1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
35 #include <linux/devcoredump.h>
36 #include <generated/utsrelease.h>
37 #include <linux/pci-p2pdma.h>
38 
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_probe_helper.h>
41 #include <drm/amdgpu_drm.h>
42 #include <linux/vgaarb.h>
43 #include <linux/vga_switcheroo.h>
44 #include <linux/efi.h>
45 #include "amdgpu.h"
46 #include "amdgpu_trace.h"
47 #include "amdgpu_i2c.h"
48 #include "atom.h"
49 #include "amdgpu_atombios.h"
50 #include "amdgpu_atomfirmware.h"
51 #include "amd_pcie.h"
52 #ifdef CONFIG_DRM_AMDGPU_SI
53 #include "si.h"
54 #endif
55 #ifdef CONFIG_DRM_AMDGPU_CIK
56 #include "cik.h"
57 #endif
58 #include "vi.h"
59 #include "soc15.h"
60 #include "nv.h"
61 #include "bif/bif_4_1_d.h"
62 #include <linux/firmware.h>
63 #include "amdgpu_vf_error.h"
64 
65 #include "amdgpu_amdkfd.h"
66 #include "amdgpu_pm.h"
67 
68 #include "amdgpu_xgmi.h"
69 #include "amdgpu_ras.h"
70 #include "amdgpu_pmu.h"
71 #include "amdgpu_fru_eeprom.h"
72 #include "amdgpu_reset.h"
73 
74 #include <linux/suspend.h>
75 #include <drm/task_barrier.h>
76 #include <linux/pm_runtime.h>
77 
78 #include <drm/drm_drv.h>
79 
80 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
85 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
86 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
87 
88 #define AMDGPU_RESUME_MS		2000
89 #define AMDGPU_MAX_RETRY_LIMIT		2
90 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
91 
92 const char *amdgpu_asic_name[] = {
93 	"TAHITI",
94 	"PITCAIRN",
95 	"VERDE",
96 	"OLAND",
97 	"HAINAN",
98 	"BONAIRE",
99 	"KAVERI",
100 	"KABINI",
101 	"HAWAII",
102 	"MULLINS",
103 	"TOPAZ",
104 	"TONGA",
105 	"FIJI",
106 	"CARRIZO",
107 	"STONEY",
108 	"POLARIS10",
109 	"POLARIS11",
110 	"POLARIS12",
111 	"VEGAM",
112 	"VEGA10",
113 	"VEGA12",
114 	"VEGA20",
115 	"RAVEN",
116 	"ARCTURUS",
117 	"RENOIR",
118 	"ALDEBARAN",
119 	"NAVI10",
120 	"CYAN_SKILLFISH",
121 	"NAVI14",
122 	"NAVI12",
123 	"SIENNA_CICHLID",
124 	"NAVY_FLOUNDER",
125 	"VANGOGH",
126 	"DIMGREY_CAVEFISH",
127 	"BEIGE_GOBY",
128 	"YELLOW_CARP",
129 	"IP DISCOVERY",
130 	"LAST",
131 };
132 
133 /**
134  * DOC: pcie_replay_count
135  *
136  * The amdgpu driver provides a sysfs API for reporting the total number
137  * of PCIe replays (NAKs)
138  * The file pcie_replay_count is used for this and returns the total
139  * number of replays as a sum of the NAKs generated and NAKs received
140  */
141 
142 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
143 		struct device_attribute *attr, char *buf)
144 {
145 	struct drm_device *ddev = dev_get_drvdata(dev);
146 	struct amdgpu_device *adev = drm_to_adev(ddev);
147 	uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
148 
149 	return sysfs_emit(buf, "%llu\n", cnt);
150 }
151 
152 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
153 		amdgpu_device_get_pcie_replay_count, NULL);
154 
155 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
156 
157 /**
158  * DOC: product_name
159  *
160  * The amdgpu driver provides a sysfs API for reporting the product name
161  * for the device
162  * The file serial_number is used for this and returns the product name
163  * as returned from the FRU.
164  * NOTE: This is only available for certain server cards
165  */
166 
167 static ssize_t amdgpu_device_get_product_name(struct device *dev,
168 		struct device_attribute *attr, char *buf)
169 {
170 	struct drm_device *ddev = dev_get_drvdata(dev);
171 	struct amdgpu_device *adev = drm_to_adev(ddev);
172 
173 	return sysfs_emit(buf, "%s\n", adev->product_name);
174 }
175 
176 static DEVICE_ATTR(product_name, S_IRUGO,
177 		amdgpu_device_get_product_name, NULL);
178 
179 /**
180  * DOC: product_number
181  *
182  * The amdgpu driver provides a sysfs API for reporting the part number
183  * for the device
184  * The file serial_number is used for this and returns the part number
185  * as returned from the FRU.
186  * NOTE: This is only available for certain server cards
187  */
188 
189 static ssize_t amdgpu_device_get_product_number(struct device *dev,
190 		struct device_attribute *attr, char *buf)
191 {
192 	struct drm_device *ddev = dev_get_drvdata(dev);
193 	struct amdgpu_device *adev = drm_to_adev(ddev);
194 
195 	return sysfs_emit(buf, "%s\n", adev->product_number);
196 }
197 
198 static DEVICE_ATTR(product_number, S_IRUGO,
199 		amdgpu_device_get_product_number, NULL);
200 
201 /**
202  * DOC: serial_number
203  *
204  * The amdgpu driver provides a sysfs API for reporting the serial number
205  * for the device
206  * The file serial_number is used for this and returns the serial number
207  * as returned from the FRU.
208  * NOTE: This is only available for certain server cards
209  */
210 
211 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
212 		struct device_attribute *attr, char *buf)
213 {
214 	struct drm_device *ddev = dev_get_drvdata(dev);
215 	struct amdgpu_device *adev = drm_to_adev(ddev);
216 
217 	return sysfs_emit(buf, "%s\n", adev->serial);
218 }
219 
220 static DEVICE_ATTR(serial_number, S_IRUGO,
221 		amdgpu_device_get_serial_number, NULL);
222 
223 /**
224  * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
225  *
226  * @dev: drm_device pointer
227  *
228  * Returns true if the device is a dGPU with ATPX power control,
229  * otherwise return false.
230  */
231 bool amdgpu_device_supports_px(struct drm_device *dev)
232 {
233 	struct amdgpu_device *adev = drm_to_adev(dev);
234 
235 	if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
236 		return true;
237 	return false;
238 }
239 
240 /**
241  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
242  *
243  * @dev: drm_device pointer
244  *
245  * Returns true if the device is a dGPU with ACPI power control,
246  * otherwise return false.
247  */
248 bool amdgpu_device_supports_boco(struct drm_device *dev)
249 {
250 	struct amdgpu_device *adev = drm_to_adev(dev);
251 
252 	if (adev->has_pr3 ||
253 	    ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
254 		return true;
255 	return false;
256 }
257 
258 /**
259  * amdgpu_device_supports_baco - Does the device support BACO
260  *
261  * @dev: drm_device pointer
262  *
263  * Returns true if the device supporte BACO,
264  * otherwise return false.
265  */
266 bool amdgpu_device_supports_baco(struct drm_device *dev)
267 {
268 	struct amdgpu_device *adev = drm_to_adev(dev);
269 
270 	return amdgpu_asic_supports_baco(adev);
271 }
272 
273 /**
274  * amdgpu_device_supports_smart_shift - Is the device dGPU with
275  * smart shift support
276  *
277  * @dev: drm_device pointer
278  *
279  * Returns true if the device is a dGPU with Smart Shift support,
280  * otherwise returns false.
281  */
282 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
283 {
284 	return (amdgpu_device_supports_boco(dev) &&
285 		amdgpu_acpi_is_power_shift_control_supported());
286 }
287 
288 /*
289  * VRAM access helper functions
290  */
291 
292 /**
293  * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
294  *
295  * @adev: amdgpu_device pointer
296  * @pos: offset of the buffer in vram
297  * @buf: virtual address of the buffer in system memory
298  * @size: read/write size, sizeof(@buf) must > @size
299  * @write: true - write to vram, otherwise - read from vram
300  */
301 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
302 			     void *buf, size_t size, bool write)
303 {
304 	unsigned long flags;
305 	uint32_t hi = ~0, tmp = 0;
306 	uint32_t *data = buf;
307 	uint64_t last;
308 	int idx;
309 
310 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
311 		return;
312 
313 	BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
314 
315 	spin_lock_irqsave(&adev->mmio_idx_lock, flags);
316 	for (last = pos + size; pos < last; pos += 4) {
317 		tmp = pos >> 31;
318 
319 		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
320 		if (tmp != hi) {
321 			WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
322 			hi = tmp;
323 		}
324 		if (write)
325 			WREG32_NO_KIQ(mmMM_DATA, *data++);
326 		else
327 			*data++ = RREG32_NO_KIQ(mmMM_DATA);
328 	}
329 
330 	spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
331 	drm_dev_exit(idx);
332 }
333 
334 /**
335  * amdgpu_device_aper_access - access vram by vram aperature
336  *
337  * @adev: amdgpu_device pointer
338  * @pos: offset of the buffer in vram
339  * @buf: virtual address of the buffer in system memory
340  * @size: read/write size, sizeof(@buf) must > @size
341  * @write: true - write to vram, otherwise - read from vram
342  *
343  * The return value means how many bytes have been transferred.
344  */
345 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
346 				 void *buf, size_t size, bool write)
347 {
348 #ifdef CONFIG_64BIT
349 	void __iomem *addr;
350 	size_t count = 0;
351 	uint64_t last;
352 
353 	if (!adev->mman.aper_base_kaddr)
354 		return 0;
355 
356 	last = min(pos + size, adev->gmc.visible_vram_size);
357 	if (last > pos) {
358 		addr = adev->mman.aper_base_kaddr + pos;
359 		count = last - pos;
360 
361 		if (write) {
362 			memcpy_toio(addr, buf, count);
363 			mb();
364 			amdgpu_device_flush_hdp(adev, NULL);
365 		} else {
366 			amdgpu_device_invalidate_hdp(adev, NULL);
367 			mb();
368 			memcpy_fromio(buf, addr, count);
369 		}
370 
371 	}
372 
373 	return count;
374 #else
375 	return 0;
376 #endif
377 }
378 
379 /**
380  * amdgpu_device_vram_access - read/write a buffer in vram
381  *
382  * @adev: amdgpu_device pointer
383  * @pos: offset of the buffer in vram
384  * @buf: virtual address of the buffer in system memory
385  * @size: read/write size, sizeof(@buf) must > @size
386  * @write: true - write to vram, otherwise - read from vram
387  */
388 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
389 			       void *buf, size_t size, bool write)
390 {
391 	size_t count;
392 
393 	/* try to using vram apreature to access vram first */
394 	count = amdgpu_device_aper_access(adev, pos, buf, size, write);
395 	size -= count;
396 	if (size) {
397 		/* using MM to access rest vram */
398 		pos += count;
399 		buf += count;
400 		amdgpu_device_mm_access(adev, pos, buf, size, write);
401 	}
402 }
403 
404 /*
405  * register access helper functions.
406  */
407 
408 /* Check if hw access should be skipped because of hotplug or device error */
409 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
410 {
411 	if (adev->no_hw_access)
412 		return true;
413 
414 #ifdef CONFIG_LOCKDEP
415 	/*
416 	 * This is a bit complicated to understand, so worth a comment. What we assert
417 	 * here is that the GPU reset is not running on another thread in parallel.
418 	 *
419 	 * For this we trylock the read side of the reset semaphore, if that succeeds
420 	 * we know that the reset is not running in paralell.
421 	 *
422 	 * If the trylock fails we assert that we are either already holding the read
423 	 * side of the lock or are the reset thread itself and hold the write side of
424 	 * the lock.
425 	 */
426 	if (in_task()) {
427 		if (down_read_trylock(&adev->reset_domain->sem))
428 			up_read(&adev->reset_domain->sem);
429 		else
430 			lockdep_assert_held(&adev->reset_domain->sem);
431 	}
432 #endif
433 	return false;
434 }
435 
436 /**
437  * amdgpu_device_rreg - read a memory mapped IO or indirect register
438  *
439  * @adev: amdgpu_device pointer
440  * @reg: dword aligned register offset
441  * @acc_flags: access flags which require special behavior
442  *
443  * Returns the 32 bit value from the offset specified.
444  */
445 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
446 			    uint32_t reg, uint32_t acc_flags)
447 {
448 	uint32_t ret;
449 
450 	if (amdgpu_device_skip_hw_access(adev))
451 		return 0;
452 
453 	if ((reg * 4) < adev->rmmio_size) {
454 		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
455 		    amdgpu_sriov_runtime(adev) &&
456 		    down_read_trylock(&adev->reset_domain->sem)) {
457 			ret = amdgpu_kiq_rreg(adev, reg);
458 			up_read(&adev->reset_domain->sem);
459 		} else {
460 			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
461 		}
462 	} else {
463 		ret = adev->pcie_rreg(adev, reg * 4);
464 	}
465 
466 	trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
467 
468 	return ret;
469 }
470 
471 /*
472  * MMIO register read with bytes helper functions
473  * @offset:bytes offset from MMIO start
474  *
475 */
476 
477 /**
478  * amdgpu_mm_rreg8 - read a memory mapped IO register
479  *
480  * @adev: amdgpu_device pointer
481  * @offset: byte aligned register offset
482  *
483  * Returns the 8 bit value from the offset specified.
484  */
485 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
486 {
487 	if (amdgpu_device_skip_hw_access(adev))
488 		return 0;
489 
490 	if (offset < adev->rmmio_size)
491 		return (readb(adev->rmmio + offset));
492 	BUG();
493 }
494 
495 /*
496  * MMIO register write with bytes helper functions
497  * @offset:bytes offset from MMIO start
498  * @value: the value want to be written to the register
499  *
500 */
501 /**
502  * amdgpu_mm_wreg8 - read a memory mapped IO register
503  *
504  * @adev: amdgpu_device pointer
505  * @offset: byte aligned register offset
506  * @value: 8 bit value to write
507  *
508  * Writes the value specified to the offset specified.
509  */
510 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
511 {
512 	if (amdgpu_device_skip_hw_access(adev))
513 		return;
514 
515 	if (offset < adev->rmmio_size)
516 		writeb(value, adev->rmmio + offset);
517 	else
518 		BUG();
519 }
520 
521 /**
522  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
523  *
524  * @adev: amdgpu_device pointer
525  * @reg: dword aligned register offset
526  * @v: 32 bit value to write to the register
527  * @acc_flags: access flags which require special behavior
528  *
529  * Writes the value specified to the offset specified.
530  */
531 void amdgpu_device_wreg(struct amdgpu_device *adev,
532 			uint32_t reg, uint32_t v,
533 			uint32_t acc_flags)
534 {
535 	if (amdgpu_device_skip_hw_access(adev))
536 		return;
537 
538 	if ((reg * 4) < adev->rmmio_size) {
539 		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
540 		    amdgpu_sriov_runtime(adev) &&
541 		    down_read_trylock(&adev->reset_domain->sem)) {
542 			amdgpu_kiq_wreg(adev, reg, v);
543 			up_read(&adev->reset_domain->sem);
544 		} else {
545 			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
546 		}
547 	} else {
548 		adev->pcie_wreg(adev, reg * 4, v);
549 	}
550 
551 	trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
552 }
553 
554 /**
555  * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
556  *
557  * @adev: amdgpu_device pointer
558  * @reg: mmio/rlc register
559  * @v: value to write
560  *
561  * this function is invoked only for the debugfs register access
562  */
563 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
564 			     uint32_t reg, uint32_t v)
565 {
566 	if (amdgpu_device_skip_hw_access(adev))
567 		return;
568 
569 	if (amdgpu_sriov_fullaccess(adev) &&
570 	    adev->gfx.rlc.funcs &&
571 	    adev->gfx.rlc.funcs->is_rlcg_access_range) {
572 		if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
573 			return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
574 	} else if ((reg * 4) >= adev->rmmio_size) {
575 		adev->pcie_wreg(adev, reg * 4, v);
576 	} else {
577 		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
578 	}
579 }
580 
581 /**
582  * amdgpu_mm_rdoorbell - read a doorbell dword
583  *
584  * @adev: amdgpu_device pointer
585  * @index: doorbell index
586  *
587  * Returns the value in the doorbell aperture at the
588  * requested doorbell index (CIK).
589  */
590 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
591 {
592 	if (amdgpu_device_skip_hw_access(adev))
593 		return 0;
594 
595 	if (index < adev->doorbell.num_doorbells) {
596 		return readl(adev->doorbell.ptr + index);
597 	} else {
598 		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
599 		return 0;
600 	}
601 }
602 
603 /**
604  * amdgpu_mm_wdoorbell - write a doorbell dword
605  *
606  * @adev: amdgpu_device pointer
607  * @index: doorbell index
608  * @v: value to write
609  *
610  * Writes @v to the doorbell aperture at the
611  * requested doorbell index (CIK).
612  */
613 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
614 {
615 	if (amdgpu_device_skip_hw_access(adev))
616 		return;
617 
618 	if (index < adev->doorbell.num_doorbells) {
619 		writel(v, adev->doorbell.ptr + index);
620 	} else {
621 		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
622 	}
623 }
624 
625 /**
626  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
627  *
628  * @adev: amdgpu_device pointer
629  * @index: doorbell index
630  *
631  * Returns the value in the doorbell aperture at the
632  * requested doorbell index (VEGA10+).
633  */
634 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
635 {
636 	if (amdgpu_device_skip_hw_access(adev))
637 		return 0;
638 
639 	if (index < adev->doorbell.num_doorbells) {
640 		return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
641 	} else {
642 		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
643 		return 0;
644 	}
645 }
646 
647 /**
648  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
649  *
650  * @adev: amdgpu_device pointer
651  * @index: doorbell index
652  * @v: value to write
653  *
654  * Writes @v to the doorbell aperture at the
655  * requested doorbell index (VEGA10+).
656  */
657 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
658 {
659 	if (amdgpu_device_skip_hw_access(adev))
660 		return;
661 
662 	if (index < adev->doorbell.num_doorbells) {
663 		atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
664 	} else {
665 		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
666 	}
667 }
668 
669 /**
670  * amdgpu_device_indirect_rreg - read an indirect register
671  *
672  * @adev: amdgpu_device pointer
673  * @pcie_index: mmio register offset
674  * @pcie_data: mmio register offset
675  * @reg_addr: indirect register address to read from
676  *
677  * Returns the value of indirect register @reg_addr
678  */
679 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
680 				u32 pcie_index, u32 pcie_data,
681 				u32 reg_addr)
682 {
683 	unsigned long flags;
684 	u32 r;
685 	void __iomem *pcie_index_offset;
686 	void __iomem *pcie_data_offset;
687 
688 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
689 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
690 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
691 
692 	writel(reg_addr, pcie_index_offset);
693 	readl(pcie_index_offset);
694 	r = readl(pcie_data_offset);
695 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
696 
697 	return r;
698 }
699 
700 /**
701  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
702  *
703  * @adev: amdgpu_device pointer
704  * @pcie_index: mmio register offset
705  * @pcie_data: mmio register offset
706  * @reg_addr: indirect register address to read from
707  *
708  * Returns the value of indirect register @reg_addr
709  */
710 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
711 				  u32 pcie_index, u32 pcie_data,
712 				  u32 reg_addr)
713 {
714 	unsigned long flags;
715 	u64 r;
716 	void __iomem *pcie_index_offset;
717 	void __iomem *pcie_data_offset;
718 
719 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
720 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
721 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
722 
723 	/* read low 32 bits */
724 	writel(reg_addr, pcie_index_offset);
725 	readl(pcie_index_offset);
726 	r = readl(pcie_data_offset);
727 	/* read high 32 bits */
728 	writel(reg_addr + 4, pcie_index_offset);
729 	readl(pcie_index_offset);
730 	r |= ((u64)readl(pcie_data_offset) << 32);
731 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
732 
733 	return r;
734 }
735 
736 /**
737  * amdgpu_device_indirect_wreg - write an indirect register address
738  *
739  * @adev: amdgpu_device pointer
740  * @pcie_index: mmio register offset
741  * @pcie_data: mmio register offset
742  * @reg_addr: indirect register offset
743  * @reg_data: indirect register data
744  *
745  */
746 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
747 				 u32 pcie_index, u32 pcie_data,
748 				 u32 reg_addr, u32 reg_data)
749 {
750 	unsigned long flags;
751 	void __iomem *pcie_index_offset;
752 	void __iomem *pcie_data_offset;
753 
754 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
755 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
756 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
757 
758 	writel(reg_addr, pcie_index_offset);
759 	readl(pcie_index_offset);
760 	writel(reg_data, pcie_data_offset);
761 	readl(pcie_data_offset);
762 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
763 }
764 
765 /**
766  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
767  *
768  * @adev: amdgpu_device pointer
769  * @pcie_index: mmio register offset
770  * @pcie_data: mmio register offset
771  * @reg_addr: indirect register offset
772  * @reg_data: indirect register data
773  *
774  */
775 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
776 				   u32 pcie_index, u32 pcie_data,
777 				   u32 reg_addr, u64 reg_data)
778 {
779 	unsigned long flags;
780 	void __iomem *pcie_index_offset;
781 	void __iomem *pcie_data_offset;
782 
783 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
784 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
785 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
786 
787 	/* write low 32 bits */
788 	writel(reg_addr, pcie_index_offset);
789 	readl(pcie_index_offset);
790 	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
791 	readl(pcie_data_offset);
792 	/* write high 32 bits */
793 	writel(reg_addr + 4, pcie_index_offset);
794 	readl(pcie_index_offset);
795 	writel((u32)(reg_data >> 32), pcie_data_offset);
796 	readl(pcie_data_offset);
797 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
798 }
799 
800 /**
801  * amdgpu_invalid_rreg - dummy reg read function
802  *
803  * @adev: amdgpu_device pointer
804  * @reg: offset of register
805  *
806  * Dummy register read function.  Used for register blocks
807  * that certain asics don't have (all asics).
808  * Returns the value in the register.
809  */
810 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
811 {
812 	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
813 	BUG();
814 	return 0;
815 }
816 
817 /**
818  * amdgpu_invalid_wreg - dummy reg write function
819  *
820  * @adev: amdgpu_device pointer
821  * @reg: offset of register
822  * @v: value to write to the register
823  *
824  * Dummy register read function.  Used for register blocks
825  * that certain asics don't have (all asics).
826  */
827 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
828 {
829 	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
830 		  reg, v);
831 	BUG();
832 }
833 
834 /**
835  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
836  *
837  * @adev: amdgpu_device pointer
838  * @reg: offset of register
839  *
840  * Dummy register read function.  Used for register blocks
841  * that certain asics don't have (all asics).
842  * Returns the value in the register.
843  */
844 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
845 {
846 	DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
847 	BUG();
848 	return 0;
849 }
850 
851 /**
852  * amdgpu_invalid_wreg64 - dummy reg write function
853  *
854  * @adev: amdgpu_device pointer
855  * @reg: offset of register
856  * @v: value to write to the register
857  *
858  * Dummy register read function.  Used for register blocks
859  * that certain asics don't have (all asics).
860  */
861 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
862 {
863 	DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
864 		  reg, v);
865 	BUG();
866 }
867 
868 /**
869  * amdgpu_block_invalid_rreg - dummy reg read function
870  *
871  * @adev: amdgpu_device pointer
872  * @block: offset of instance
873  * @reg: offset of register
874  *
875  * Dummy register read function.  Used for register blocks
876  * that certain asics don't have (all asics).
877  * Returns the value in the register.
878  */
879 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
880 					  uint32_t block, uint32_t reg)
881 {
882 	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
883 		  reg, block);
884 	BUG();
885 	return 0;
886 }
887 
888 /**
889  * amdgpu_block_invalid_wreg - dummy reg write function
890  *
891  * @adev: amdgpu_device pointer
892  * @block: offset of instance
893  * @reg: offset of register
894  * @v: value to write to the register
895  *
896  * Dummy register read function.  Used for register blocks
897  * that certain asics don't have (all asics).
898  */
899 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
900 				      uint32_t block,
901 				      uint32_t reg, uint32_t v)
902 {
903 	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
904 		  reg, block, v);
905 	BUG();
906 }
907 
908 /**
909  * amdgpu_device_asic_init - Wrapper for atom asic_init
910  *
911  * @adev: amdgpu_device pointer
912  *
913  * Does any asic specific work and then calls atom asic init.
914  */
915 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
916 {
917 	amdgpu_asic_pre_asic_init(adev);
918 
919 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
920 		return amdgpu_atomfirmware_asic_init(adev, true);
921 	else
922 		return amdgpu_atom_asic_init(adev->mode_info.atom_context);
923 }
924 
925 /**
926  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
927  *
928  * @adev: amdgpu_device pointer
929  *
930  * Allocates a scratch page of VRAM for use by various things in the
931  * driver.
932  */
933 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
934 {
935 	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
936 				       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
937 				       &adev->vram_scratch.robj,
938 				       &adev->vram_scratch.gpu_addr,
939 				       (void **)&adev->vram_scratch.ptr);
940 }
941 
942 /**
943  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
944  *
945  * @adev: amdgpu_device pointer
946  *
947  * Frees the VRAM scratch page.
948  */
949 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
950 {
951 	amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
952 }
953 
954 /**
955  * amdgpu_device_program_register_sequence - program an array of registers.
956  *
957  * @adev: amdgpu_device pointer
958  * @registers: pointer to the register array
959  * @array_size: size of the register array
960  *
961  * Programs an array or registers with and and or masks.
962  * This is a helper for setting golden registers.
963  */
964 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
965 					     const u32 *registers,
966 					     const u32 array_size)
967 {
968 	u32 tmp, reg, and_mask, or_mask;
969 	int i;
970 
971 	if (array_size % 3)
972 		return;
973 
974 	for (i = 0; i < array_size; i +=3) {
975 		reg = registers[i + 0];
976 		and_mask = registers[i + 1];
977 		or_mask = registers[i + 2];
978 
979 		if (and_mask == 0xffffffff) {
980 			tmp = or_mask;
981 		} else {
982 			tmp = RREG32(reg);
983 			tmp &= ~and_mask;
984 			if (adev->family >= AMDGPU_FAMILY_AI)
985 				tmp |= (or_mask & and_mask);
986 			else
987 				tmp |= or_mask;
988 		}
989 		WREG32(reg, tmp);
990 	}
991 }
992 
993 /**
994  * amdgpu_device_pci_config_reset - reset the GPU
995  *
996  * @adev: amdgpu_device pointer
997  *
998  * Resets the GPU using the pci config reset sequence.
999  * Only applicable to asics prior to vega10.
1000  */
1001 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1002 {
1003 	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1004 }
1005 
1006 /**
1007  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1008  *
1009  * @adev: amdgpu_device pointer
1010  *
1011  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1012  */
1013 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1014 {
1015 	return pci_reset_function(adev->pdev);
1016 }
1017 
1018 /*
1019  * GPU doorbell aperture helpers function.
1020  */
1021 /**
1022  * amdgpu_device_doorbell_init - Init doorbell driver information.
1023  *
1024  * @adev: amdgpu_device pointer
1025  *
1026  * Init doorbell driver information (CIK)
1027  * Returns 0 on success, error on failure.
1028  */
1029 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1030 {
1031 
1032 	/* No doorbell on SI hardware generation */
1033 	if (adev->asic_type < CHIP_BONAIRE) {
1034 		adev->doorbell.base = 0;
1035 		adev->doorbell.size = 0;
1036 		adev->doorbell.num_doorbells = 0;
1037 		adev->doorbell.ptr = NULL;
1038 		return 0;
1039 	}
1040 
1041 	if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1042 		return -EINVAL;
1043 
1044 	amdgpu_asic_init_doorbell_index(adev);
1045 
1046 	/* doorbell bar mapping */
1047 	adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1048 	adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1049 
1050 	if (adev->enable_mes) {
1051 		adev->doorbell.num_doorbells =
1052 			adev->doorbell.size / sizeof(u32);
1053 	} else {
1054 		adev->doorbell.num_doorbells =
1055 			min_t(u32, adev->doorbell.size / sizeof(u32),
1056 			      adev->doorbell_index.max_assignment+1);
1057 		if (adev->doorbell.num_doorbells == 0)
1058 			return -EINVAL;
1059 
1060 		/* For Vega, reserve and map two pages on doorbell BAR since SDMA
1061 		 * paging queue doorbell use the second page. The
1062 		 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1063 		 * doorbells are in the first page. So with paging queue enabled,
1064 		 * the max num_doorbells should + 1 page (0x400 in dword)
1065 		 */
1066 		if (adev->asic_type >= CHIP_VEGA10)
1067 			adev->doorbell.num_doorbells += 0x400;
1068 	}
1069 
1070 	adev->doorbell.ptr = ioremap(adev->doorbell.base,
1071 				     adev->doorbell.num_doorbells *
1072 				     sizeof(u32));
1073 	if (adev->doorbell.ptr == NULL)
1074 		return -ENOMEM;
1075 
1076 	return 0;
1077 }
1078 
1079 /**
1080  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1081  *
1082  * @adev: amdgpu_device pointer
1083  *
1084  * Tear down doorbell driver information (CIK)
1085  */
1086 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1087 {
1088 	iounmap(adev->doorbell.ptr);
1089 	adev->doorbell.ptr = NULL;
1090 }
1091 
1092 
1093 
1094 /*
1095  * amdgpu_device_wb_*()
1096  * Writeback is the method by which the GPU updates special pages in memory
1097  * with the status of certain GPU events (fences, ring pointers,etc.).
1098  */
1099 
1100 /**
1101  * amdgpu_device_wb_fini - Disable Writeback and free memory
1102  *
1103  * @adev: amdgpu_device pointer
1104  *
1105  * Disables Writeback and frees the Writeback memory (all asics).
1106  * Used at driver shutdown.
1107  */
1108 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1109 {
1110 	if (adev->wb.wb_obj) {
1111 		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1112 				      &adev->wb.gpu_addr,
1113 				      (void **)&adev->wb.wb);
1114 		adev->wb.wb_obj = NULL;
1115 	}
1116 }
1117 
1118 /**
1119  * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1120  *
1121  * @adev: amdgpu_device pointer
1122  *
1123  * Initializes writeback and allocates writeback memory (all asics).
1124  * Used at driver startup.
1125  * Returns 0 on success or an -error on failure.
1126  */
1127 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1128 {
1129 	int r;
1130 
1131 	if (adev->wb.wb_obj == NULL) {
1132 		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1133 		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1134 					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1135 					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
1136 					    (void **)&adev->wb.wb);
1137 		if (r) {
1138 			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1139 			return r;
1140 		}
1141 
1142 		adev->wb.num_wb = AMDGPU_MAX_WB;
1143 		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1144 
1145 		/* clear wb memory */
1146 		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1147 	}
1148 
1149 	return 0;
1150 }
1151 
1152 /**
1153  * amdgpu_device_wb_get - Allocate a wb entry
1154  *
1155  * @adev: amdgpu_device pointer
1156  * @wb: wb index
1157  *
1158  * Allocate a wb slot for use by the driver (all asics).
1159  * Returns 0 on success or -EINVAL on failure.
1160  */
1161 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1162 {
1163 	unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1164 
1165 	if (offset < adev->wb.num_wb) {
1166 		__set_bit(offset, adev->wb.used);
1167 		*wb = offset << 3; /* convert to dw offset */
1168 		return 0;
1169 	} else {
1170 		return -EINVAL;
1171 	}
1172 }
1173 
1174 /**
1175  * amdgpu_device_wb_free - Free a wb entry
1176  *
1177  * @adev: amdgpu_device pointer
1178  * @wb: wb index
1179  *
1180  * Free a wb slot allocated for use by the driver (all asics)
1181  */
1182 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1183 {
1184 	wb >>= 3;
1185 	if (wb < adev->wb.num_wb)
1186 		__clear_bit(wb, adev->wb.used);
1187 }
1188 
1189 /**
1190  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1191  *
1192  * @adev: amdgpu_device pointer
1193  *
1194  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1195  * to fail, but if any of the BARs is not accessible after the size we abort
1196  * driver loading by returning -ENODEV.
1197  */
1198 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1199 {
1200 	int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1201 	struct pci_bus *root;
1202 	struct resource *res;
1203 	unsigned i;
1204 	u16 cmd;
1205 	int r;
1206 
1207 	/* Bypass for VF */
1208 	if (amdgpu_sriov_vf(adev))
1209 		return 0;
1210 
1211 	/* skip if the bios has already enabled large BAR */
1212 	if (adev->gmc.real_vram_size &&
1213 	    (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1214 		return 0;
1215 
1216 	/* Check if the root BUS has 64bit memory resources */
1217 	root = adev->pdev->bus;
1218 	while (root->parent)
1219 		root = root->parent;
1220 
1221 	pci_bus_for_each_resource(root, res, i) {
1222 		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1223 		    res->start > 0x100000000ull)
1224 			break;
1225 	}
1226 
1227 	/* Trying to resize is pointless without a root hub window above 4GB */
1228 	if (!res)
1229 		return 0;
1230 
1231 	/* Limit the BAR size to what is available */
1232 	rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1233 			rbar_size);
1234 
1235 	/* Disable memory decoding while we change the BAR addresses and size */
1236 	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1237 	pci_write_config_word(adev->pdev, PCI_COMMAND,
1238 			      cmd & ~PCI_COMMAND_MEMORY);
1239 
1240 	/* Free the VRAM and doorbell BAR, we most likely need to move both. */
1241 	amdgpu_device_doorbell_fini(adev);
1242 	if (adev->asic_type >= CHIP_BONAIRE)
1243 		pci_release_resource(adev->pdev, 2);
1244 
1245 	pci_release_resource(adev->pdev, 0);
1246 
1247 	r = pci_resize_resource(adev->pdev, 0, rbar_size);
1248 	if (r == -ENOSPC)
1249 		DRM_INFO("Not enough PCI address space for a large BAR.");
1250 	else if (r && r != -ENOTSUPP)
1251 		DRM_ERROR("Problem resizing BAR0 (%d).", r);
1252 
1253 	pci_assign_unassigned_bus_resources(adev->pdev->bus);
1254 
1255 	/* When the doorbell or fb BAR isn't available we have no chance of
1256 	 * using the device.
1257 	 */
1258 	r = amdgpu_device_doorbell_init(adev);
1259 	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1260 		return -ENODEV;
1261 
1262 	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1263 
1264 	return 0;
1265 }
1266 
1267 /*
1268  * GPU helpers function.
1269  */
1270 /**
1271  * amdgpu_device_need_post - check if the hw need post or not
1272  *
1273  * @adev: amdgpu_device pointer
1274  *
1275  * Check if the asic has been initialized (all asics) at driver startup
1276  * or post is needed if  hw reset is performed.
1277  * Returns true if need or false if not.
1278  */
1279 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1280 {
1281 	uint32_t reg;
1282 
1283 	if (amdgpu_sriov_vf(adev))
1284 		return false;
1285 
1286 	if (amdgpu_passthrough(adev)) {
1287 		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1288 		 * some old smc fw still need driver do vPost otherwise gpu hang, while
1289 		 * those smc fw version above 22.15 doesn't have this flaw, so we force
1290 		 * vpost executed for smc version below 22.15
1291 		 */
1292 		if (adev->asic_type == CHIP_FIJI) {
1293 			int err;
1294 			uint32_t fw_ver;
1295 			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1296 			/* force vPost if error occured */
1297 			if (err)
1298 				return true;
1299 
1300 			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1301 			if (fw_ver < 0x00160e00)
1302 				return true;
1303 		}
1304 	}
1305 
1306 	/* Don't post if we need to reset whole hive on init */
1307 	if (adev->gmc.xgmi.pending_reset)
1308 		return false;
1309 
1310 	if (adev->has_hw_reset) {
1311 		adev->has_hw_reset = false;
1312 		return true;
1313 	}
1314 
1315 	/* bios scratch used on CIK+ */
1316 	if (adev->asic_type >= CHIP_BONAIRE)
1317 		return amdgpu_atombios_scratch_need_asic_init(adev);
1318 
1319 	/* check MEM_SIZE for older asics */
1320 	reg = amdgpu_asic_get_config_memsize(adev);
1321 
1322 	if ((reg != 0) && (reg != 0xffffffff))
1323 		return false;
1324 
1325 	return true;
1326 }
1327 
1328 /**
1329  * amdgpu_device_should_use_aspm - check if the device should program ASPM
1330  *
1331  * @adev: amdgpu_device pointer
1332  *
1333  * Confirm whether the module parameter and pcie bridge agree that ASPM should
1334  * be set for this device.
1335  *
1336  * Returns true if it should be used or false if not.
1337  */
1338 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1339 {
1340 	switch (amdgpu_aspm) {
1341 	case -1:
1342 		break;
1343 	case 0:
1344 		return false;
1345 	case 1:
1346 		return true;
1347 	default:
1348 		return false;
1349 	}
1350 	return pcie_aspm_enabled(adev->pdev);
1351 }
1352 
1353 /* if we get transitioned to only one device, take VGA back */
1354 /**
1355  * amdgpu_device_vga_set_decode - enable/disable vga decode
1356  *
1357  * @pdev: PCI device pointer
1358  * @state: enable/disable vga decode
1359  *
1360  * Enable/disable vga decode (all asics).
1361  * Returns VGA resource flags.
1362  */
1363 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1364 		bool state)
1365 {
1366 	struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1367 	amdgpu_asic_set_vga_state(adev, state);
1368 	if (state)
1369 		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1370 		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1371 	else
1372 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1373 }
1374 
1375 /**
1376  * amdgpu_device_check_block_size - validate the vm block size
1377  *
1378  * @adev: amdgpu_device pointer
1379  *
1380  * Validates the vm block size specified via module parameter.
1381  * The vm block size defines number of bits in page table versus page directory,
1382  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1383  * page table and the remaining bits are in the page directory.
1384  */
1385 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1386 {
1387 	/* defines number of bits in page table versus page directory,
1388 	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1389 	 * page table and the remaining bits are in the page directory */
1390 	if (amdgpu_vm_block_size == -1)
1391 		return;
1392 
1393 	if (amdgpu_vm_block_size < 9) {
1394 		dev_warn(adev->dev, "VM page table size (%d) too small\n",
1395 			 amdgpu_vm_block_size);
1396 		amdgpu_vm_block_size = -1;
1397 	}
1398 }
1399 
1400 /**
1401  * amdgpu_device_check_vm_size - validate the vm size
1402  *
1403  * @adev: amdgpu_device pointer
1404  *
1405  * Validates the vm size in GB specified via module parameter.
1406  * The VM size is the size of the GPU virtual memory space in GB.
1407  */
1408 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1409 {
1410 	/* no need to check the default value */
1411 	if (amdgpu_vm_size == -1)
1412 		return;
1413 
1414 	if (amdgpu_vm_size < 1) {
1415 		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1416 			 amdgpu_vm_size);
1417 		amdgpu_vm_size = -1;
1418 	}
1419 }
1420 
1421 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1422 {
1423 	struct sysinfo si;
1424 	bool is_os_64 = (sizeof(void *) == 8);
1425 	uint64_t total_memory;
1426 	uint64_t dram_size_seven_GB = 0x1B8000000;
1427 	uint64_t dram_size_three_GB = 0xB8000000;
1428 
1429 	if (amdgpu_smu_memory_pool_size == 0)
1430 		return;
1431 
1432 	if (!is_os_64) {
1433 		DRM_WARN("Not 64-bit OS, feature not supported\n");
1434 		goto def_value;
1435 	}
1436 	si_meminfo(&si);
1437 	total_memory = (uint64_t)si.totalram * si.mem_unit;
1438 
1439 	if ((amdgpu_smu_memory_pool_size == 1) ||
1440 		(amdgpu_smu_memory_pool_size == 2)) {
1441 		if (total_memory < dram_size_three_GB)
1442 			goto def_value1;
1443 	} else if ((amdgpu_smu_memory_pool_size == 4) ||
1444 		(amdgpu_smu_memory_pool_size == 8)) {
1445 		if (total_memory < dram_size_seven_GB)
1446 			goto def_value1;
1447 	} else {
1448 		DRM_WARN("Smu memory pool size not supported\n");
1449 		goto def_value;
1450 	}
1451 	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1452 
1453 	return;
1454 
1455 def_value1:
1456 	DRM_WARN("No enough system memory\n");
1457 def_value:
1458 	adev->pm.smu_prv_buffer_size = 0;
1459 }
1460 
1461 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1462 {
1463 	if (!(adev->flags & AMD_IS_APU) ||
1464 	    adev->asic_type < CHIP_RAVEN)
1465 		return 0;
1466 
1467 	switch (adev->asic_type) {
1468 	case CHIP_RAVEN:
1469 		if (adev->pdev->device == 0x15dd)
1470 			adev->apu_flags |= AMD_APU_IS_RAVEN;
1471 		if (adev->pdev->device == 0x15d8)
1472 			adev->apu_flags |= AMD_APU_IS_PICASSO;
1473 		break;
1474 	case CHIP_RENOIR:
1475 		if ((adev->pdev->device == 0x1636) ||
1476 		    (adev->pdev->device == 0x164c))
1477 			adev->apu_flags |= AMD_APU_IS_RENOIR;
1478 		else
1479 			adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1480 		break;
1481 	case CHIP_VANGOGH:
1482 		adev->apu_flags |= AMD_APU_IS_VANGOGH;
1483 		break;
1484 	case CHIP_YELLOW_CARP:
1485 		break;
1486 	case CHIP_CYAN_SKILLFISH:
1487 		if ((adev->pdev->device == 0x13FE) ||
1488 		    (adev->pdev->device == 0x143F))
1489 			adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1490 		break;
1491 	default:
1492 		break;
1493 	}
1494 
1495 	return 0;
1496 }
1497 
1498 /**
1499  * amdgpu_device_check_arguments - validate module params
1500  *
1501  * @adev: amdgpu_device pointer
1502  *
1503  * Validates certain module parameters and updates
1504  * the associated values used by the driver (all asics).
1505  */
1506 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1507 {
1508 	if (amdgpu_sched_jobs < 4) {
1509 		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1510 			 amdgpu_sched_jobs);
1511 		amdgpu_sched_jobs = 4;
1512 	} else if (!is_power_of_2(amdgpu_sched_jobs)){
1513 		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1514 			 amdgpu_sched_jobs);
1515 		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1516 	}
1517 
1518 	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1519 		/* gart size must be greater or equal to 32M */
1520 		dev_warn(adev->dev, "gart size (%d) too small\n",
1521 			 amdgpu_gart_size);
1522 		amdgpu_gart_size = -1;
1523 	}
1524 
1525 	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1526 		/* gtt size must be greater or equal to 32M */
1527 		dev_warn(adev->dev, "gtt size (%d) too small\n",
1528 				 amdgpu_gtt_size);
1529 		amdgpu_gtt_size = -1;
1530 	}
1531 
1532 	/* valid range is between 4 and 9 inclusive */
1533 	if (amdgpu_vm_fragment_size != -1 &&
1534 	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1535 		dev_warn(adev->dev, "valid range is between 4 and 9\n");
1536 		amdgpu_vm_fragment_size = -1;
1537 	}
1538 
1539 	if (amdgpu_sched_hw_submission < 2) {
1540 		dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1541 			 amdgpu_sched_hw_submission);
1542 		amdgpu_sched_hw_submission = 2;
1543 	} else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1544 		dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1545 			 amdgpu_sched_hw_submission);
1546 		amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1547 	}
1548 
1549 	if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1550 		dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1551 		amdgpu_reset_method = -1;
1552 	}
1553 
1554 	amdgpu_device_check_smu_prv_buffer_size(adev);
1555 
1556 	amdgpu_device_check_vm_size(adev);
1557 
1558 	amdgpu_device_check_block_size(adev);
1559 
1560 	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1561 
1562 	return 0;
1563 }
1564 
1565 /**
1566  * amdgpu_switcheroo_set_state - set switcheroo state
1567  *
1568  * @pdev: pci dev pointer
1569  * @state: vga_switcheroo state
1570  *
1571  * Callback for the switcheroo driver.  Suspends or resumes the
1572  * the asics before or after it is powered up using ACPI methods.
1573  */
1574 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1575 					enum vga_switcheroo_state state)
1576 {
1577 	struct drm_device *dev = pci_get_drvdata(pdev);
1578 	int r;
1579 
1580 	if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1581 		return;
1582 
1583 	if (state == VGA_SWITCHEROO_ON) {
1584 		pr_info("switched on\n");
1585 		/* don't suspend or resume card normally */
1586 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1587 
1588 		pci_set_power_state(pdev, PCI_D0);
1589 		amdgpu_device_load_pci_state(pdev);
1590 		r = pci_enable_device(pdev);
1591 		if (r)
1592 			DRM_WARN("pci_enable_device failed (%d)\n", r);
1593 		amdgpu_device_resume(dev, true);
1594 
1595 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
1596 	} else {
1597 		pr_info("switched off\n");
1598 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1599 		amdgpu_device_suspend(dev, true);
1600 		amdgpu_device_cache_pci_state(pdev);
1601 		/* Shut down the device */
1602 		pci_disable_device(pdev);
1603 		pci_set_power_state(pdev, PCI_D3cold);
1604 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1605 	}
1606 }
1607 
1608 /**
1609  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1610  *
1611  * @pdev: pci dev pointer
1612  *
1613  * Callback for the switcheroo driver.  Check of the switcheroo
1614  * state can be changed.
1615  * Returns true if the state can be changed, false if not.
1616  */
1617 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1618 {
1619 	struct drm_device *dev = pci_get_drvdata(pdev);
1620 
1621 	/*
1622 	* FIXME: open_count is protected by drm_global_mutex but that would lead to
1623 	* locking inversion with the driver load path. And the access here is
1624 	* completely racy anyway. So don't bother with locking for now.
1625 	*/
1626 	return atomic_read(&dev->open_count) == 0;
1627 }
1628 
1629 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1630 	.set_gpu_state = amdgpu_switcheroo_set_state,
1631 	.reprobe = NULL,
1632 	.can_switch = amdgpu_switcheroo_can_switch,
1633 };
1634 
1635 /**
1636  * amdgpu_device_ip_set_clockgating_state - set the CG state
1637  *
1638  * @dev: amdgpu_device pointer
1639  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1640  * @state: clockgating state (gate or ungate)
1641  *
1642  * Sets the requested clockgating state for all instances of
1643  * the hardware IP specified.
1644  * Returns the error code from the last instance.
1645  */
1646 int amdgpu_device_ip_set_clockgating_state(void *dev,
1647 					   enum amd_ip_block_type block_type,
1648 					   enum amd_clockgating_state state)
1649 {
1650 	struct amdgpu_device *adev = dev;
1651 	int i, r = 0;
1652 
1653 	for (i = 0; i < adev->num_ip_blocks; i++) {
1654 		if (!adev->ip_blocks[i].status.valid)
1655 			continue;
1656 		if (adev->ip_blocks[i].version->type != block_type)
1657 			continue;
1658 		if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1659 			continue;
1660 		r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1661 			(void *)adev, state);
1662 		if (r)
1663 			DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1664 				  adev->ip_blocks[i].version->funcs->name, r);
1665 	}
1666 	return r;
1667 }
1668 
1669 /**
1670  * amdgpu_device_ip_set_powergating_state - set the PG state
1671  *
1672  * @dev: amdgpu_device pointer
1673  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1674  * @state: powergating state (gate or ungate)
1675  *
1676  * Sets the requested powergating state for all instances of
1677  * the hardware IP specified.
1678  * Returns the error code from the last instance.
1679  */
1680 int amdgpu_device_ip_set_powergating_state(void *dev,
1681 					   enum amd_ip_block_type block_type,
1682 					   enum amd_powergating_state state)
1683 {
1684 	struct amdgpu_device *adev = dev;
1685 	int i, r = 0;
1686 
1687 	for (i = 0; i < adev->num_ip_blocks; i++) {
1688 		if (!adev->ip_blocks[i].status.valid)
1689 			continue;
1690 		if (adev->ip_blocks[i].version->type != block_type)
1691 			continue;
1692 		if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1693 			continue;
1694 		r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1695 			(void *)adev, state);
1696 		if (r)
1697 			DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1698 				  adev->ip_blocks[i].version->funcs->name, r);
1699 	}
1700 	return r;
1701 }
1702 
1703 /**
1704  * amdgpu_device_ip_get_clockgating_state - get the CG state
1705  *
1706  * @adev: amdgpu_device pointer
1707  * @flags: clockgating feature flags
1708  *
1709  * Walks the list of IPs on the device and updates the clockgating
1710  * flags for each IP.
1711  * Updates @flags with the feature flags for each hardware IP where
1712  * clockgating is enabled.
1713  */
1714 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1715 					    u64 *flags)
1716 {
1717 	int i;
1718 
1719 	for (i = 0; i < adev->num_ip_blocks; i++) {
1720 		if (!adev->ip_blocks[i].status.valid)
1721 			continue;
1722 		if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1723 			adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1724 	}
1725 }
1726 
1727 /**
1728  * amdgpu_device_ip_wait_for_idle - wait for idle
1729  *
1730  * @adev: amdgpu_device pointer
1731  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1732  *
1733  * Waits for the request hardware IP to be idle.
1734  * Returns 0 for success or a negative error code on failure.
1735  */
1736 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1737 				   enum amd_ip_block_type block_type)
1738 {
1739 	int i, r;
1740 
1741 	for (i = 0; i < adev->num_ip_blocks; i++) {
1742 		if (!adev->ip_blocks[i].status.valid)
1743 			continue;
1744 		if (adev->ip_blocks[i].version->type == block_type) {
1745 			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1746 			if (r)
1747 				return r;
1748 			break;
1749 		}
1750 	}
1751 	return 0;
1752 
1753 }
1754 
1755 /**
1756  * amdgpu_device_ip_is_idle - is the hardware IP idle
1757  *
1758  * @adev: amdgpu_device pointer
1759  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1760  *
1761  * Check if the hardware IP is idle or not.
1762  * Returns true if it the IP is idle, false if not.
1763  */
1764 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1765 			      enum amd_ip_block_type block_type)
1766 {
1767 	int i;
1768 
1769 	for (i = 0; i < adev->num_ip_blocks; i++) {
1770 		if (!adev->ip_blocks[i].status.valid)
1771 			continue;
1772 		if (adev->ip_blocks[i].version->type == block_type)
1773 			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1774 	}
1775 	return true;
1776 
1777 }
1778 
1779 /**
1780  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1781  *
1782  * @adev: amdgpu_device pointer
1783  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1784  *
1785  * Returns a pointer to the hardware IP block structure
1786  * if it exists for the asic, otherwise NULL.
1787  */
1788 struct amdgpu_ip_block *
1789 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1790 			      enum amd_ip_block_type type)
1791 {
1792 	int i;
1793 
1794 	for (i = 0; i < adev->num_ip_blocks; i++)
1795 		if (adev->ip_blocks[i].version->type == type)
1796 			return &adev->ip_blocks[i];
1797 
1798 	return NULL;
1799 }
1800 
1801 /**
1802  * amdgpu_device_ip_block_version_cmp
1803  *
1804  * @adev: amdgpu_device pointer
1805  * @type: enum amd_ip_block_type
1806  * @major: major version
1807  * @minor: minor version
1808  *
1809  * return 0 if equal or greater
1810  * return 1 if smaller or the ip_block doesn't exist
1811  */
1812 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1813 				       enum amd_ip_block_type type,
1814 				       u32 major, u32 minor)
1815 {
1816 	struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1817 
1818 	if (ip_block && ((ip_block->version->major > major) ||
1819 			((ip_block->version->major == major) &&
1820 			(ip_block->version->minor >= minor))))
1821 		return 0;
1822 
1823 	return 1;
1824 }
1825 
1826 /**
1827  * amdgpu_device_ip_block_add
1828  *
1829  * @adev: amdgpu_device pointer
1830  * @ip_block_version: pointer to the IP to add
1831  *
1832  * Adds the IP block driver information to the collection of IPs
1833  * on the asic.
1834  */
1835 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1836 			       const struct amdgpu_ip_block_version *ip_block_version)
1837 {
1838 	if (!ip_block_version)
1839 		return -EINVAL;
1840 
1841 	switch (ip_block_version->type) {
1842 	case AMD_IP_BLOCK_TYPE_VCN:
1843 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1844 			return 0;
1845 		break;
1846 	case AMD_IP_BLOCK_TYPE_JPEG:
1847 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1848 			return 0;
1849 		break;
1850 	default:
1851 		break;
1852 	}
1853 
1854 	DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1855 		  ip_block_version->funcs->name);
1856 
1857 	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1858 
1859 	return 0;
1860 }
1861 
1862 /**
1863  * amdgpu_device_enable_virtual_display - enable virtual display feature
1864  *
1865  * @adev: amdgpu_device pointer
1866  *
1867  * Enabled the virtual display feature if the user has enabled it via
1868  * the module parameter virtual_display.  This feature provides a virtual
1869  * display hardware on headless boards or in virtualized environments.
1870  * This function parses and validates the configuration string specified by
1871  * the user and configues the virtual display configuration (number of
1872  * virtual connectors, crtcs, etc.) specified.
1873  */
1874 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1875 {
1876 	adev->enable_virtual_display = false;
1877 
1878 	if (amdgpu_virtual_display) {
1879 		const char *pci_address_name = pci_name(adev->pdev);
1880 		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1881 
1882 		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1883 		pciaddstr_tmp = pciaddstr;
1884 		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1885 			pciaddname = strsep(&pciaddname_tmp, ",");
1886 			if (!strcmp("all", pciaddname)
1887 			    || !strcmp(pci_address_name, pciaddname)) {
1888 				long num_crtc;
1889 				int res = -1;
1890 
1891 				adev->enable_virtual_display = true;
1892 
1893 				if (pciaddname_tmp)
1894 					res = kstrtol(pciaddname_tmp, 10,
1895 						      &num_crtc);
1896 
1897 				if (!res) {
1898 					if (num_crtc < 1)
1899 						num_crtc = 1;
1900 					if (num_crtc > 6)
1901 						num_crtc = 6;
1902 					adev->mode_info.num_crtc = num_crtc;
1903 				} else {
1904 					adev->mode_info.num_crtc = 1;
1905 				}
1906 				break;
1907 			}
1908 		}
1909 
1910 		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1911 			 amdgpu_virtual_display, pci_address_name,
1912 			 adev->enable_virtual_display, adev->mode_info.num_crtc);
1913 
1914 		kfree(pciaddstr);
1915 	}
1916 }
1917 
1918 /**
1919  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1920  *
1921  * @adev: amdgpu_device pointer
1922  *
1923  * Parses the asic configuration parameters specified in the gpu info
1924  * firmware and makes them availale to the driver for use in configuring
1925  * the asic.
1926  * Returns 0 on success, -EINVAL on failure.
1927  */
1928 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1929 {
1930 	const char *chip_name;
1931 	char fw_name[40];
1932 	int err;
1933 	const struct gpu_info_firmware_header_v1_0 *hdr;
1934 
1935 	adev->firmware.gpu_info_fw = NULL;
1936 
1937 	if (adev->mman.discovery_bin) {
1938 		/*
1939 		 * FIXME: The bounding box is still needed by Navi12, so
1940 		 * temporarily read it from gpu_info firmware. Should be dropped
1941 		 * when DAL no longer needs it.
1942 		 */
1943 		if (adev->asic_type != CHIP_NAVI12)
1944 			return 0;
1945 	}
1946 
1947 	switch (adev->asic_type) {
1948 	default:
1949 		return 0;
1950 	case CHIP_VEGA10:
1951 		chip_name = "vega10";
1952 		break;
1953 	case CHIP_VEGA12:
1954 		chip_name = "vega12";
1955 		break;
1956 	case CHIP_RAVEN:
1957 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1958 			chip_name = "raven2";
1959 		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1960 			chip_name = "picasso";
1961 		else
1962 			chip_name = "raven";
1963 		break;
1964 	case CHIP_ARCTURUS:
1965 		chip_name = "arcturus";
1966 		break;
1967 	case CHIP_NAVI12:
1968 		chip_name = "navi12";
1969 		break;
1970 	}
1971 
1972 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1973 	err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1974 	if (err) {
1975 		dev_err(adev->dev,
1976 			"Failed to load gpu_info firmware \"%s\"\n",
1977 			fw_name);
1978 		goto out;
1979 	}
1980 	err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1981 	if (err) {
1982 		dev_err(adev->dev,
1983 			"Failed to validate gpu_info firmware \"%s\"\n",
1984 			fw_name);
1985 		goto out;
1986 	}
1987 
1988 	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1989 	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1990 
1991 	switch (hdr->version_major) {
1992 	case 1:
1993 	{
1994 		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1995 			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1996 								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1997 
1998 		/*
1999 		 * Should be droped when DAL no longer needs it.
2000 		 */
2001 		if (adev->asic_type == CHIP_NAVI12)
2002 			goto parse_soc_bounding_box;
2003 
2004 		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2005 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2006 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2007 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2008 		adev->gfx.config.max_texture_channel_caches =
2009 			le32_to_cpu(gpu_info_fw->gc_num_tccs);
2010 		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2011 		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2012 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2013 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2014 		adev->gfx.config.double_offchip_lds_buf =
2015 			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2016 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2017 		adev->gfx.cu_info.max_waves_per_simd =
2018 			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2019 		adev->gfx.cu_info.max_scratch_slots_per_cu =
2020 			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2021 		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2022 		if (hdr->version_minor >= 1) {
2023 			const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2024 				(const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2025 									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2026 			adev->gfx.config.num_sc_per_sh =
2027 				le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2028 			adev->gfx.config.num_packer_per_sc =
2029 				le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2030 		}
2031 
2032 parse_soc_bounding_box:
2033 		/*
2034 		 * soc bounding box info is not integrated in disocovery table,
2035 		 * we always need to parse it from gpu info firmware if needed.
2036 		 */
2037 		if (hdr->version_minor == 2) {
2038 			const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2039 				(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2040 									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2041 			adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2042 		}
2043 		break;
2044 	}
2045 	default:
2046 		dev_err(adev->dev,
2047 			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2048 		err = -EINVAL;
2049 		goto out;
2050 	}
2051 out:
2052 	return err;
2053 }
2054 
2055 /**
2056  * amdgpu_device_ip_early_init - run early init for hardware IPs
2057  *
2058  * @adev: amdgpu_device pointer
2059  *
2060  * Early initialization pass for hardware IPs.  The hardware IPs that make
2061  * up each asic are discovered each IP's early_init callback is run.  This
2062  * is the first stage in initializing the asic.
2063  * Returns 0 on success, negative error code on failure.
2064  */
2065 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2066 {
2067 	struct drm_device *dev = adev_to_drm(adev);
2068 	struct pci_dev *parent;
2069 	int i, r;
2070 
2071 	amdgpu_device_enable_virtual_display(adev);
2072 
2073 	if (amdgpu_sriov_vf(adev)) {
2074 		r = amdgpu_virt_request_full_gpu(adev, true);
2075 		if (r)
2076 			return r;
2077 	}
2078 
2079 	switch (adev->asic_type) {
2080 #ifdef CONFIG_DRM_AMDGPU_SI
2081 	case CHIP_VERDE:
2082 	case CHIP_TAHITI:
2083 	case CHIP_PITCAIRN:
2084 	case CHIP_OLAND:
2085 	case CHIP_HAINAN:
2086 		adev->family = AMDGPU_FAMILY_SI;
2087 		r = si_set_ip_blocks(adev);
2088 		if (r)
2089 			return r;
2090 		break;
2091 #endif
2092 #ifdef CONFIG_DRM_AMDGPU_CIK
2093 	case CHIP_BONAIRE:
2094 	case CHIP_HAWAII:
2095 	case CHIP_KAVERI:
2096 	case CHIP_KABINI:
2097 	case CHIP_MULLINS:
2098 		if (adev->flags & AMD_IS_APU)
2099 			adev->family = AMDGPU_FAMILY_KV;
2100 		else
2101 			adev->family = AMDGPU_FAMILY_CI;
2102 
2103 		r = cik_set_ip_blocks(adev);
2104 		if (r)
2105 			return r;
2106 		break;
2107 #endif
2108 	case CHIP_TOPAZ:
2109 	case CHIP_TONGA:
2110 	case CHIP_FIJI:
2111 	case CHIP_POLARIS10:
2112 	case CHIP_POLARIS11:
2113 	case CHIP_POLARIS12:
2114 	case CHIP_VEGAM:
2115 	case CHIP_CARRIZO:
2116 	case CHIP_STONEY:
2117 		if (adev->flags & AMD_IS_APU)
2118 			adev->family = AMDGPU_FAMILY_CZ;
2119 		else
2120 			adev->family = AMDGPU_FAMILY_VI;
2121 
2122 		r = vi_set_ip_blocks(adev);
2123 		if (r)
2124 			return r;
2125 		break;
2126 	default:
2127 		r = amdgpu_discovery_set_ip_blocks(adev);
2128 		if (r)
2129 			return r;
2130 		break;
2131 	}
2132 
2133 	if (amdgpu_has_atpx() &&
2134 	    (amdgpu_is_atpx_hybrid() ||
2135 	     amdgpu_has_atpx_dgpu_power_cntl()) &&
2136 	    ((adev->flags & AMD_IS_APU) == 0) &&
2137 	    !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
2138 		adev->flags |= AMD_IS_PX;
2139 
2140 	if (!(adev->flags & AMD_IS_APU)) {
2141 		parent = pci_upstream_bridge(adev->pdev);
2142 		adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2143 	}
2144 
2145 	amdgpu_amdkfd_device_probe(adev);
2146 
2147 	adev->pm.pp_feature = amdgpu_pp_feature_mask;
2148 	if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2149 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2150 	if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2151 		adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2152 
2153 	for (i = 0; i < adev->num_ip_blocks; i++) {
2154 		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2155 			DRM_ERROR("disabled ip block: %d <%s>\n",
2156 				  i, adev->ip_blocks[i].version->funcs->name);
2157 			adev->ip_blocks[i].status.valid = false;
2158 		} else {
2159 			if (adev->ip_blocks[i].version->funcs->early_init) {
2160 				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2161 				if (r == -ENOENT) {
2162 					adev->ip_blocks[i].status.valid = false;
2163 				} else if (r) {
2164 					DRM_ERROR("early_init of IP block <%s> failed %d\n",
2165 						  adev->ip_blocks[i].version->funcs->name, r);
2166 					return r;
2167 				} else {
2168 					adev->ip_blocks[i].status.valid = true;
2169 				}
2170 			} else {
2171 				adev->ip_blocks[i].status.valid = true;
2172 			}
2173 		}
2174 		/* get the vbios after the asic_funcs are set up */
2175 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2176 			r = amdgpu_device_parse_gpu_info_fw(adev);
2177 			if (r)
2178 				return r;
2179 
2180 			/* Read BIOS */
2181 			if (!amdgpu_get_bios(adev))
2182 				return -EINVAL;
2183 
2184 			r = amdgpu_atombios_init(adev);
2185 			if (r) {
2186 				dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2187 				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2188 				return r;
2189 			}
2190 
2191 			/*get pf2vf msg info at it's earliest time*/
2192 			if (amdgpu_sriov_vf(adev))
2193 				amdgpu_virt_init_data_exchange(adev);
2194 
2195 		}
2196 	}
2197 
2198 	adev->cg_flags &= amdgpu_cg_mask;
2199 	adev->pg_flags &= amdgpu_pg_mask;
2200 
2201 	return 0;
2202 }
2203 
2204 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2205 {
2206 	int i, r;
2207 
2208 	for (i = 0; i < adev->num_ip_blocks; i++) {
2209 		if (!adev->ip_blocks[i].status.sw)
2210 			continue;
2211 		if (adev->ip_blocks[i].status.hw)
2212 			continue;
2213 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2214 		    (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2215 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2216 			r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2217 			if (r) {
2218 				DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2219 					  adev->ip_blocks[i].version->funcs->name, r);
2220 				return r;
2221 			}
2222 			adev->ip_blocks[i].status.hw = true;
2223 		}
2224 	}
2225 
2226 	return 0;
2227 }
2228 
2229 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2230 {
2231 	int i, r;
2232 
2233 	for (i = 0; i < adev->num_ip_blocks; i++) {
2234 		if (!adev->ip_blocks[i].status.sw)
2235 			continue;
2236 		if (adev->ip_blocks[i].status.hw)
2237 			continue;
2238 		r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2239 		if (r) {
2240 			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2241 				  adev->ip_blocks[i].version->funcs->name, r);
2242 			return r;
2243 		}
2244 		adev->ip_blocks[i].status.hw = true;
2245 	}
2246 
2247 	return 0;
2248 }
2249 
2250 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2251 {
2252 	int r = 0;
2253 	int i;
2254 	uint32_t smu_version;
2255 
2256 	if (adev->asic_type >= CHIP_VEGA10) {
2257 		for (i = 0; i < adev->num_ip_blocks; i++) {
2258 			if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2259 				continue;
2260 
2261 			if (!adev->ip_blocks[i].status.sw)
2262 				continue;
2263 
2264 			/* no need to do the fw loading again if already done*/
2265 			if (adev->ip_blocks[i].status.hw == true)
2266 				break;
2267 
2268 			if (amdgpu_in_reset(adev) || adev->in_suspend) {
2269 				r = adev->ip_blocks[i].version->funcs->resume(adev);
2270 				if (r) {
2271 					DRM_ERROR("resume of IP block <%s> failed %d\n",
2272 							  adev->ip_blocks[i].version->funcs->name, r);
2273 					return r;
2274 				}
2275 			} else {
2276 				r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2277 				if (r) {
2278 					DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2279 							  adev->ip_blocks[i].version->funcs->name, r);
2280 					return r;
2281 				}
2282 			}
2283 
2284 			adev->ip_blocks[i].status.hw = true;
2285 			break;
2286 		}
2287 	}
2288 
2289 	if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2290 		r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2291 
2292 	return r;
2293 }
2294 
2295 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2296 {
2297 	long timeout;
2298 	int r, i;
2299 
2300 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2301 		struct amdgpu_ring *ring = adev->rings[i];
2302 
2303 		/* No need to setup the GPU scheduler for rings that don't need it */
2304 		if (!ring || ring->no_scheduler)
2305 			continue;
2306 
2307 		switch (ring->funcs->type) {
2308 		case AMDGPU_RING_TYPE_GFX:
2309 			timeout = adev->gfx_timeout;
2310 			break;
2311 		case AMDGPU_RING_TYPE_COMPUTE:
2312 			timeout = adev->compute_timeout;
2313 			break;
2314 		case AMDGPU_RING_TYPE_SDMA:
2315 			timeout = adev->sdma_timeout;
2316 			break;
2317 		default:
2318 			timeout = adev->video_timeout;
2319 			break;
2320 		}
2321 
2322 		r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
2323 				   ring->num_hw_submission, amdgpu_job_hang_limit,
2324 				   timeout, adev->reset_domain->wq,
2325 				   ring->sched_score, ring->name,
2326 				   adev->dev);
2327 		if (r) {
2328 			DRM_ERROR("Failed to create scheduler on ring %s.\n",
2329 				  ring->name);
2330 			return r;
2331 		}
2332 	}
2333 
2334 	return 0;
2335 }
2336 
2337 
2338 /**
2339  * amdgpu_device_ip_init - run init for hardware IPs
2340  *
2341  * @adev: amdgpu_device pointer
2342  *
2343  * Main initialization pass for hardware IPs.  The list of all the hardware
2344  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2345  * are run.  sw_init initializes the software state associated with each IP
2346  * and hw_init initializes the hardware associated with each IP.
2347  * Returns 0 on success, negative error code on failure.
2348  */
2349 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2350 {
2351 	int i, r;
2352 
2353 	r = amdgpu_ras_init(adev);
2354 	if (r)
2355 		return r;
2356 
2357 	for (i = 0; i < adev->num_ip_blocks; i++) {
2358 		if (!adev->ip_blocks[i].status.valid)
2359 			continue;
2360 		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2361 		if (r) {
2362 			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2363 				  adev->ip_blocks[i].version->funcs->name, r);
2364 			goto init_failed;
2365 		}
2366 		adev->ip_blocks[i].status.sw = true;
2367 
2368 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2369 			/* need to do common hw init early so everything is set up for gmc */
2370 			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2371 			if (r) {
2372 				DRM_ERROR("hw_init %d failed %d\n", i, r);
2373 				goto init_failed;
2374 			}
2375 			adev->ip_blocks[i].status.hw = true;
2376 		} else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2377 			/* need to do gmc hw init early so we can allocate gpu mem */
2378 			/* Try to reserve bad pages early */
2379 			if (amdgpu_sriov_vf(adev))
2380 				amdgpu_virt_exchange_data(adev);
2381 
2382 			r = amdgpu_device_vram_scratch_init(adev);
2383 			if (r) {
2384 				DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2385 				goto init_failed;
2386 			}
2387 			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2388 			if (r) {
2389 				DRM_ERROR("hw_init %d failed %d\n", i, r);
2390 				goto init_failed;
2391 			}
2392 			r = amdgpu_device_wb_init(adev);
2393 			if (r) {
2394 				DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2395 				goto init_failed;
2396 			}
2397 			adev->ip_blocks[i].status.hw = true;
2398 
2399 			/* right after GMC hw init, we create CSA */
2400 			if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2401 				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2402 								AMDGPU_GEM_DOMAIN_VRAM,
2403 								AMDGPU_CSA_SIZE);
2404 				if (r) {
2405 					DRM_ERROR("allocate CSA failed %d\n", r);
2406 					goto init_failed;
2407 				}
2408 			}
2409 		}
2410 	}
2411 
2412 	if (amdgpu_sriov_vf(adev))
2413 		amdgpu_virt_init_data_exchange(adev);
2414 
2415 	r = amdgpu_ib_pool_init(adev);
2416 	if (r) {
2417 		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2418 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2419 		goto init_failed;
2420 	}
2421 
2422 	r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2423 	if (r)
2424 		goto init_failed;
2425 
2426 	r = amdgpu_device_ip_hw_init_phase1(adev);
2427 	if (r)
2428 		goto init_failed;
2429 
2430 	r = amdgpu_device_fw_loading(adev);
2431 	if (r)
2432 		goto init_failed;
2433 
2434 	r = amdgpu_device_ip_hw_init_phase2(adev);
2435 	if (r)
2436 		goto init_failed;
2437 
2438 	/*
2439 	 * retired pages will be loaded from eeprom and reserved here,
2440 	 * it should be called after amdgpu_device_ip_hw_init_phase2  since
2441 	 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2442 	 * for I2C communication which only true at this point.
2443 	 *
2444 	 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2445 	 * failure from bad gpu situation and stop amdgpu init process
2446 	 * accordingly. For other failed cases, it will still release all
2447 	 * the resource and print error message, rather than returning one
2448 	 * negative value to upper level.
2449 	 *
2450 	 * Note: theoretically, this should be called before all vram allocations
2451 	 * to protect retired page from abusing
2452 	 */
2453 	r = amdgpu_ras_recovery_init(adev);
2454 	if (r)
2455 		goto init_failed;
2456 
2457 	/**
2458 	 * In case of XGMI grab extra reference for reset domain for this device
2459 	 */
2460 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2461 		if (amdgpu_xgmi_add_device(adev) == 0) {
2462 			if (!amdgpu_sriov_vf(adev)) {
2463 				struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2464 
2465 				if (!hive->reset_domain ||
2466 				    !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2467 					r = -ENOENT;
2468 					amdgpu_put_xgmi_hive(hive);
2469 					goto init_failed;
2470 				}
2471 
2472 				/* Drop the early temporary reset domain we created for device */
2473 				amdgpu_reset_put_reset_domain(adev->reset_domain);
2474 				adev->reset_domain = hive->reset_domain;
2475 				amdgpu_put_xgmi_hive(hive);
2476 			}
2477 		}
2478 	}
2479 
2480 	r = amdgpu_device_init_schedulers(adev);
2481 	if (r)
2482 		goto init_failed;
2483 
2484 	/* Don't init kfd if whole hive need to be reset during init */
2485 	if (!adev->gmc.xgmi.pending_reset)
2486 		amdgpu_amdkfd_device_init(adev);
2487 
2488 	amdgpu_fru_get_product_info(adev);
2489 
2490 init_failed:
2491 	if (amdgpu_sriov_vf(adev))
2492 		amdgpu_virt_release_full_gpu(adev, true);
2493 
2494 	return r;
2495 }
2496 
2497 /**
2498  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2499  *
2500  * @adev: amdgpu_device pointer
2501  *
2502  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2503  * this function before a GPU reset.  If the value is retained after a
2504  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2505  */
2506 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2507 {
2508 	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2509 }
2510 
2511 /**
2512  * amdgpu_device_check_vram_lost - check if vram is valid
2513  *
2514  * @adev: amdgpu_device pointer
2515  *
2516  * Checks the reset magic value written to the gart pointer in VRAM.
2517  * The driver calls this after a GPU reset to see if the contents of
2518  * VRAM is lost or now.
2519  * returns true if vram is lost, false if not.
2520  */
2521 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2522 {
2523 	if (memcmp(adev->gart.ptr, adev->reset_magic,
2524 			AMDGPU_RESET_MAGIC_NUM))
2525 		return true;
2526 
2527 	if (!amdgpu_in_reset(adev))
2528 		return false;
2529 
2530 	/*
2531 	 * For all ASICs with baco/mode1 reset, the VRAM is
2532 	 * always assumed to be lost.
2533 	 */
2534 	switch (amdgpu_asic_reset_method(adev)) {
2535 	case AMD_RESET_METHOD_BACO:
2536 	case AMD_RESET_METHOD_MODE1:
2537 		return true;
2538 	default:
2539 		return false;
2540 	}
2541 }
2542 
2543 /**
2544  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2545  *
2546  * @adev: amdgpu_device pointer
2547  * @state: clockgating state (gate or ungate)
2548  *
2549  * The list of all the hardware IPs that make up the asic is walked and the
2550  * set_clockgating_state callbacks are run.
2551  * Late initialization pass enabling clockgating for hardware IPs.
2552  * Fini or suspend, pass disabling clockgating for hardware IPs.
2553  * Returns 0 on success, negative error code on failure.
2554  */
2555 
2556 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2557 			       enum amd_clockgating_state state)
2558 {
2559 	int i, j, r;
2560 
2561 	if (amdgpu_emu_mode == 1)
2562 		return 0;
2563 
2564 	for (j = 0; j < adev->num_ip_blocks; j++) {
2565 		i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2566 		if (!adev->ip_blocks[i].status.late_initialized)
2567 			continue;
2568 		/* skip CG for GFX on S0ix */
2569 		if (adev->in_s0ix &&
2570 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2571 			continue;
2572 		/* skip CG for VCE/UVD, it's handled specially */
2573 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2574 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2575 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2576 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2577 		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2578 			/* enable clockgating to save power */
2579 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2580 										     state);
2581 			if (r) {
2582 				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2583 					  adev->ip_blocks[i].version->funcs->name, r);
2584 				return r;
2585 			}
2586 		}
2587 	}
2588 
2589 	return 0;
2590 }
2591 
2592 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2593 			       enum amd_powergating_state state)
2594 {
2595 	int i, j, r;
2596 
2597 	if (amdgpu_emu_mode == 1)
2598 		return 0;
2599 
2600 	for (j = 0; j < adev->num_ip_blocks; j++) {
2601 		i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2602 		if (!adev->ip_blocks[i].status.late_initialized)
2603 			continue;
2604 		/* skip PG for GFX on S0ix */
2605 		if (adev->in_s0ix &&
2606 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2607 			continue;
2608 		/* skip CG for VCE/UVD, it's handled specially */
2609 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2610 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2611 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2612 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2613 		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
2614 			/* enable powergating to save power */
2615 			r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2616 											state);
2617 			if (r) {
2618 				DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2619 					  adev->ip_blocks[i].version->funcs->name, r);
2620 				return r;
2621 			}
2622 		}
2623 	}
2624 	return 0;
2625 }
2626 
2627 static int amdgpu_device_enable_mgpu_fan_boost(void)
2628 {
2629 	struct amdgpu_gpu_instance *gpu_ins;
2630 	struct amdgpu_device *adev;
2631 	int i, ret = 0;
2632 
2633 	mutex_lock(&mgpu_info.mutex);
2634 
2635 	/*
2636 	 * MGPU fan boost feature should be enabled
2637 	 * only when there are two or more dGPUs in
2638 	 * the system
2639 	 */
2640 	if (mgpu_info.num_dgpu < 2)
2641 		goto out;
2642 
2643 	for (i = 0; i < mgpu_info.num_dgpu; i++) {
2644 		gpu_ins = &(mgpu_info.gpu_ins[i]);
2645 		adev = gpu_ins->adev;
2646 		if (!(adev->flags & AMD_IS_APU) &&
2647 		    !gpu_ins->mgpu_fan_enabled) {
2648 			ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2649 			if (ret)
2650 				break;
2651 
2652 			gpu_ins->mgpu_fan_enabled = 1;
2653 		}
2654 	}
2655 
2656 out:
2657 	mutex_unlock(&mgpu_info.mutex);
2658 
2659 	return ret;
2660 }
2661 
2662 /**
2663  * amdgpu_device_ip_late_init - run late init for hardware IPs
2664  *
2665  * @adev: amdgpu_device pointer
2666  *
2667  * Late initialization pass for hardware IPs.  The list of all the hardware
2668  * IPs that make up the asic is walked and the late_init callbacks are run.
2669  * late_init covers any special initialization that an IP requires
2670  * after all of the have been initialized or something that needs to happen
2671  * late in the init process.
2672  * Returns 0 on success, negative error code on failure.
2673  */
2674 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2675 {
2676 	struct amdgpu_gpu_instance *gpu_instance;
2677 	int i = 0, r;
2678 
2679 	for (i = 0; i < adev->num_ip_blocks; i++) {
2680 		if (!adev->ip_blocks[i].status.hw)
2681 			continue;
2682 		if (adev->ip_blocks[i].version->funcs->late_init) {
2683 			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2684 			if (r) {
2685 				DRM_ERROR("late_init of IP block <%s> failed %d\n",
2686 					  adev->ip_blocks[i].version->funcs->name, r);
2687 				return r;
2688 			}
2689 		}
2690 		adev->ip_blocks[i].status.late_initialized = true;
2691 	}
2692 
2693 	r = amdgpu_ras_late_init(adev);
2694 	if (r) {
2695 		DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2696 		return r;
2697 	}
2698 
2699 	amdgpu_ras_set_error_query_ready(adev, true);
2700 
2701 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2702 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2703 
2704 	amdgpu_device_fill_reset_magic(adev);
2705 
2706 	r = amdgpu_device_enable_mgpu_fan_boost();
2707 	if (r)
2708 		DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2709 
2710 	/* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2711 	if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
2712 			       adev->asic_type == CHIP_ALDEBARAN ))
2713 		amdgpu_dpm_handle_passthrough_sbr(adev, true);
2714 
2715 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2716 		mutex_lock(&mgpu_info.mutex);
2717 
2718 		/*
2719 		 * Reset device p-state to low as this was booted with high.
2720 		 *
2721 		 * This should be performed only after all devices from the same
2722 		 * hive get initialized.
2723 		 *
2724 		 * However, it's unknown how many device in the hive in advance.
2725 		 * As this is counted one by one during devices initializations.
2726 		 *
2727 		 * So, we wait for all XGMI interlinked devices initialized.
2728 		 * This may bring some delays as those devices may come from
2729 		 * different hives. But that should be OK.
2730 		 */
2731 		if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2732 			for (i = 0; i < mgpu_info.num_gpu; i++) {
2733 				gpu_instance = &(mgpu_info.gpu_ins[i]);
2734 				if (gpu_instance->adev->flags & AMD_IS_APU)
2735 					continue;
2736 
2737 				r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2738 						AMDGPU_XGMI_PSTATE_MIN);
2739 				if (r) {
2740 					DRM_ERROR("pstate setting failed (%d).\n", r);
2741 					break;
2742 				}
2743 			}
2744 		}
2745 
2746 		mutex_unlock(&mgpu_info.mutex);
2747 	}
2748 
2749 	return 0;
2750 }
2751 
2752 /**
2753  * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2754  *
2755  * @adev: amdgpu_device pointer
2756  *
2757  * For ASICs need to disable SMC first
2758  */
2759 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2760 {
2761 	int i, r;
2762 
2763 	if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2764 		return;
2765 
2766 	for (i = 0; i < adev->num_ip_blocks; i++) {
2767 		if (!adev->ip_blocks[i].status.hw)
2768 			continue;
2769 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2770 			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2771 			/* XXX handle errors */
2772 			if (r) {
2773 				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2774 					  adev->ip_blocks[i].version->funcs->name, r);
2775 			}
2776 			adev->ip_blocks[i].status.hw = false;
2777 			break;
2778 		}
2779 	}
2780 }
2781 
2782 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2783 {
2784 	int i, r;
2785 
2786 	for (i = 0; i < adev->num_ip_blocks; i++) {
2787 		if (!adev->ip_blocks[i].version->funcs->early_fini)
2788 			continue;
2789 
2790 		r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2791 		if (r) {
2792 			DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2793 				  adev->ip_blocks[i].version->funcs->name, r);
2794 		}
2795 	}
2796 
2797 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2798 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2799 
2800 	amdgpu_amdkfd_suspend(adev, false);
2801 
2802 	/* Workaroud for ASICs need to disable SMC first */
2803 	amdgpu_device_smu_fini_early(adev);
2804 
2805 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2806 		if (!adev->ip_blocks[i].status.hw)
2807 			continue;
2808 
2809 		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2810 		/* XXX handle errors */
2811 		if (r) {
2812 			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2813 				  adev->ip_blocks[i].version->funcs->name, r);
2814 		}
2815 
2816 		adev->ip_blocks[i].status.hw = false;
2817 	}
2818 
2819 	if (amdgpu_sriov_vf(adev)) {
2820 		if (amdgpu_virt_release_full_gpu(adev, false))
2821 			DRM_ERROR("failed to release exclusive mode on fini\n");
2822 	}
2823 
2824 	return 0;
2825 }
2826 
2827 /**
2828  * amdgpu_device_ip_fini - run fini for hardware IPs
2829  *
2830  * @adev: amdgpu_device pointer
2831  *
2832  * Main teardown pass for hardware IPs.  The list of all the hardware
2833  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2834  * are run.  hw_fini tears down the hardware associated with each IP
2835  * and sw_fini tears down any software state associated with each IP.
2836  * Returns 0 on success, negative error code on failure.
2837  */
2838 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2839 {
2840 	int i, r;
2841 
2842 	if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2843 		amdgpu_virt_release_ras_err_handler_data(adev);
2844 
2845 	if (adev->gmc.xgmi.num_physical_nodes > 1)
2846 		amdgpu_xgmi_remove_device(adev);
2847 
2848 	amdgpu_amdkfd_device_fini_sw(adev);
2849 
2850 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2851 		if (!adev->ip_blocks[i].status.sw)
2852 			continue;
2853 
2854 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2855 			amdgpu_ucode_free_bo(adev);
2856 			amdgpu_free_static_csa(&adev->virt.csa_obj);
2857 			amdgpu_device_wb_fini(adev);
2858 			amdgpu_device_vram_scratch_fini(adev);
2859 			amdgpu_ib_pool_fini(adev);
2860 		}
2861 
2862 		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2863 		/* XXX handle errors */
2864 		if (r) {
2865 			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2866 				  adev->ip_blocks[i].version->funcs->name, r);
2867 		}
2868 		adev->ip_blocks[i].status.sw = false;
2869 		adev->ip_blocks[i].status.valid = false;
2870 	}
2871 
2872 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2873 		if (!adev->ip_blocks[i].status.late_initialized)
2874 			continue;
2875 		if (adev->ip_blocks[i].version->funcs->late_fini)
2876 			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2877 		adev->ip_blocks[i].status.late_initialized = false;
2878 	}
2879 
2880 	amdgpu_ras_fini(adev);
2881 
2882 	return 0;
2883 }
2884 
2885 /**
2886  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2887  *
2888  * @work: work_struct.
2889  */
2890 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2891 {
2892 	struct amdgpu_device *adev =
2893 		container_of(work, struct amdgpu_device, delayed_init_work.work);
2894 	int r;
2895 
2896 	r = amdgpu_ib_ring_tests(adev);
2897 	if (r)
2898 		DRM_ERROR("ib ring test failed (%d).\n", r);
2899 }
2900 
2901 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2902 {
2903 	struct amdgpu_device *adev =
2904 		container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2905 
2906 	WARN_ON_ONCE(adev->gfx.gfx_off_state);
2907 	WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2908 
2909 	if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2910 		adev->gfx.gfx_off_state = true;
2911 }
2912 
2913 /**
2914  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2915  *
2916  * @adev: amdgpu_device pointer
2917  *
2918  * Main suspend function for hardware IPs.  The list of all the hardware
2919  * IPs that make up the asic is walked, clockgating is disabled and the
2920  * suspend callbacks are run.  suspend puts the hardware and software state
2921  * in each IP into a state suitable for suspend.
2922  * Returns 0 on success, negative error code on failure.
2923  */
2924 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2925 {
2926 	int i, r;
2927 
2928 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2929 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2930 
2931 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2932 		if (!adev->ip_blocks[i].status.valid)
2933 			continue;
2934 
2935 		/* displays are handled separately */
2936 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2937 			continue;
2938 
2939 		/* XXX handle errors */
2940 		r = adev->ip_blocks[i].version->funcs->suspend(adev);
2941 		/* XXX handle errors */
2942 		if (r) {
2943 			DRM_ERROR("suspend of IP block <%s> failed %d\n",
2944 				  adev->ip_blocks[i].version->funcs->name, r);
2945 			return r;
2946 		}
2947 
2948 		adev->ip_blocks[i].status.hw = false;
2949 	}
2950 
2951 	return 0;
2952 }
2953 
2954 /**
2955  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2956  *
2957  * @adev: amdgpu_device pointer
2958  *
2959  * Main suspend function for hardware IPs.  The list of all the hardware
2960  * IPs that make up the asic is walked, clockgating is disabled and the
2961  * suspend callbacks are run.  suspend puts the hardware and software state
2962  * in each IP into a state suitable for suspend.
2963  * Returns 0 on success, negative error code on failure.
2964  */
2965 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2966 {
2967 	int i, r;
2968 
2969 	if (adev->in_s0ix)
2970 		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
2971 
2972 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2973 		if (!adev->ip_blocks[i].status.valid)
2974 			continue;
2975 		/* displays are handled in phase1 */
2976 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2977 			continue;
2978 		/* PSP lost connection when err_event_athub occurs */
2979 		if (amdgpu_ras_intr_triggered() &&
2980 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2981 			adev->ip_blocks[i].status.hw = false;
2982 			continue;
2983 		}
2984 
2985 		/* skip unnecessary suspend if we do not initialize them yet */
2986 		if (adev->gmc.xgmi.pending_reset &&
2987 		    !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2988 		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2989 		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2990 		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2991 			adev->ip_blocks[i].status.hw = false;
2992 			continue;
2993 		}
2994 
2995 		/* skip suspend of gfx and psp for S0ix
2996 		 * gfx is in gfxoff state, so on resume it will exit gfxoff just
2997 		 * like at runtime. PSP is also part of the always on hardware
2998 		 * so no need to suspend it.
2999 		 */
3000 		if (adev->in_s0ix &&
3001 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3002 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
3003 			continue;
3004 
3005 		/* XXX handle errors */
3006 		r = adev->ip_blocks[i].version->funcs->suspend(adev);
3007 		/* XXX handle errors */
3008 		if (r) {
3009 			DRM_ERROR("suspend of IP block <%s> failed %d\n",
3010 				  adev->ip_blocks[i].version->funcs->name, r);
3011 		}
3012 		adev->ip_blocks[i].status.hw = false;
3013 		/* handle putting the SMC in the appropriate state */
3014 		if(!amdgpu_sriov_vf(adev)){
3015 			if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3016 				r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3017 				if (r) {
3018 					DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3019 							adev->mp1_state, r);
3020 					return r;
3021 				}
3022 			}
3023 		}
3024 	}
3025 
3026 	return 0;
3027 }
3028 
3029 /**
3030  * amdgpu_device_ip_suspend - run suspend for hardware IPs
3031  *
3032  * @adev: amdgpu_device pointer
3033  *
3034  * Main suspend function for hardware IPs.  The list of all the hardware
3035  * IPs that make up the asic is walked, clockgating is disabled and the
3036  * suspend callbacks are run.  suspend puts the hardware and software state
3037  * in each IP into a state suitable for suspend.
3038  * Returns 0 on success, negative error code on failure.
3039  */
3040 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3041 {
3042 	int r;
3043 
3044 	if (amdgpu_sriov_vf(adev)) {
3045 		amdgpu_virt_fini_data_exchange(adev);
3046 		amdgpu_virt_request_full_gpu(adev, false);
3047 	}
3048 
3049 	r = amdgpu_device_ip_suspend_phase1(adev);
3050 	if (r)
3051 		return r;
3052 	r = amdgpu_device_ip_suspend_phase2(adev);
3053 
3054 	if (amdgpu_sriov_vf(adev))
3055 		amdgpu_virt_release_full_gpu(adev, false);
3056 
3057 	return r;
3058 }
3059 
3060 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3061 {
3062 	int i, r;
3063 
3064 	static enum amd_ip_block_type ip_order[] = {
3065 		AMD_IP_BLOCK_TYPE_COMMON,
3066 		AMD_IP_BLOCK_TYPE_GMC,
3067 		AMD_IP_BLOCK_TYPE_PSP,
3068 		AMD_IP_BLOCK_TYPE_IH,
3069 	};
3070 
3071 	for (i = 0; i < adev->num_ip_blocks; i++) {
3072 		int j;
3073 		struct amdgpu_ip_block *block;
3074 
3075 		block = &adev->ip_blocks[i];
3076 		block->status.hw = false;
3077 
3078 		for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3079 
3080 			if (block->version->type != ip_order[j] ||
3081 				!block->status.valid)
3082 				continue;
3083 
3084 			r = block->version->funcs->hw_init(adev);
3085 			DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3086 			if (r)
3087 				return r;
3088 			block->status.hw = true;
3089 		}
3090 	}
3091 
3092 	return 0;
3093 }
3094 
3095 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3096 {
3097 	int i, r;
3098 
3099 	static enum amd_ip_block_type ip_order[] = {
3100 		AMD_IP_BLOCK_TYPE_SMC,
3101 		AMD_IP_BLOCK_TYPE_DCE,
3102 		AMD_IP_BLOCK_TYPE_GFX,
3103 		AMD_IP_BLOCK_TYPE_SDMA,
3104 		AMD_IP_BLOCK_TYPE_UVD,
3105 		AMD_IP_BLOCK_TYPE_VCE,
3106 		AMD_IP_BLOCK_TYPE_VCN
3107 	};
3108 
3109 	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3110 		int j;
3111 		struct amdgpu_ip_block *block;
3112 
3113 		for (j = 0; j < adev->num_ip_blocks; j++) {
3114 			block = &adev->ip_blocks[j];
3115 
3116 			if (block->version->type != ip_order[i] ||
3117 				!block->status.valid ||
3118 				block->status.hw)
3119 				continue;
3120 
3121 			if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3122 				r = block->version->funcs->resume(adev);
3123 			else
3124 				r = block->version->funcs->hw_init(adev);
3125 
3126 			DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3127 			if (r)
3128 				return r;
3129 			block->status.hw = true;
3130 		}
3131 	}
3132 
3133 	return 0;
3134 }
3135 
3136 /**
3137  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3138  *
3139  * @adev: amdgpu_device pointer
3140  *
3141  * First resume function for hardware IPs.  The list of all the hardware
3142  * IPs that make up the asic is walked and the resume callbacks are run for
3143  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3144  * after a suspend and updates the software state as necessary.  This
3145  * function is also used for restoring the GPU after a GPU reset.
3146  * Returns 0 on success, negative error code on failure.
3147  */
3148 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3149 {
3150 	int i, r;
3151 
3152 	for (i = 0; i < adev->num_ip_blocks; i++) {
3153 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3154 			continue;
3155 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3156 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3157 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
3158 
3159 			r = adev->ip_blocks[i].version->funcs->resume(adev);
3160 			if (r) {
3161 				DRM_ERROR("resume of IP block <%s> failed %d\n",
3162 					  adev->ip_blocks[i].version->funcs->name, r);
3163 				return r;
3164 			}
3165 			adev->ip_blocks[i].status.hw = true;
3166 		}
3167 	}
3168 
3169 	return 0;
3170 }
3171 
3172 /**
3173  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3174  *
3175  * @adev: amdgpu_device pointer
3176  *
3177  * First resume function for hardware IPs.  The list of all the hardware
3178  * IPs that make up the asic is walked and the resume callbacks are run for
3179  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3180  * functional state after a suspend and updates the software state as
3181  * necessary.  This function is also used for restoring the GPU after a GPU
3182  * reset.
3183  * Returns 0 on success, negative error code on failure.
3184  */
3185 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3186 {
3187 	int i, r;
3188 
3189 	for (i = 0; i < adev->num_ip_blocks; i++) {
3190 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3191 			continue;
3192 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3193 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3194 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3195 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3196 			continue;
3197 		r = adev->ip_blocks[i].version->funcs->resume(adev);
3198 		if (r) {
3199 			DRM_ERROR("resume of IP block <%s> failed %d\n",
3200 				  adev->ip_blocks[i].version->funcs->name, r);
3201 			return r;
3202 		}
3203 		adev->ip_blocks[i].status.hw = true;
3204 	}
3205 
3206 	return 0;
3207 }
3208 
3209 /**
3210  * amdgpu_device_ip_resume - run resume for hardware IPs
3211  *
3212  * @adev: amdgpu_device pointer
3213  *
3214  * Main resume function for hardware IPs.  The hardware IPs
3215  * are split into two resume functions because they are
3216  * are also used in in recovering from a GPU reset and some additional
3217  * steps need to be take between them.  In this case (S3/S4) they are
3218  * run sequentially.
3219  * Returns 0 on success, negative error code on failure.
3220  */
3221 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3222 {
3223 	int r;
3224 
3225 	r = amdgpu_amdkfd_resume_iommu(adev);
3226 	if (r)
3227 		return r;
3228 
3229 	r = amdgpu_device_ip_resume_phase1(adev);
3230 	if (r)
3231 		return r;
3232 
3233 	r = amdgpu_device_fw_loading(adev);
3234 	if (r)
3235 		return r;
3236 
3237 	r = amdgpu_device_ip_resume_phase2(adev);
3238 
3239 	return r;
3240 }
3241 
3242 /**
3243  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3244  *
3245  * @adev: amdgpu_device pointer
3246  *
3247  * Query the VBIOS data tables to determine if the board supports SR-IOV.
3248  */
3249 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3250 {
3251 	if (amdgpu_sriov_vf(adev)) {
3252 		if (adev->is_atom_fw) {
3253 			if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3254 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3255 		} else {
3256 			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3257 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3258 		}
3259 
3260 		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3261 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3262 	}
3263 }
3264 
3265 /**
3266  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3267  *
3268  * @asic_type: AMD asic type
3269  *
3270  * Check if there is DC (new modesetting infrastructre) support for an asic.
3271  * returns true if DC has support, false if not.
3272  */
3273 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3274 {
3275 	switch (asic_type) {
3276 #ifdef CONFIG_DRM_AMDGPU_SI
3277 	case CHIP_HAINAN:
3278 #endif
3279 	case CHIP_TOPAZ:
3280 		/* chips with no display hardware */
3281 		return false;
3282 #if defined(CONFIG_DRM_AMD_DC)
3283 	case CHIP_TAHITI:
3284 	case CHIP_PITCAIRN:
3285 	case CHIP_VERDE:
3286 	case CHIP_OLAND:
3287 		/*
3288 		 * We have systems in the wild with these ASICs that require
3289 		 * LVDS and VGA support which is not supported with DC.
3290 		 *
3291 		 * Fallback to the non-DC driver here by default so as not to
3292 		 * cause regressions.
3293 		 */
3294 #if defined(CONFIG_DRM_AMD_DC_SI)
3295 		return amdgpu_dc > 0;
3296 #else
3297 		return false;
3298 #endif
3299 	case CHIP_BONAIRE:
3300 	case CHIP_KAVERI:
3301 	case CHIP_KABINI:
3302 	case CHIP_MULLINS:
3303 		/*
3304 		 * We have systems in the wild with these ASICs that require
3305 		 * VGA support which is not supported with DC.
3306 		 *
3307 		 * Fallback to the non-DC driver here by default so as not to
3308 		 * cause regressions.
3309 		 */
3310 		return amdgpu_dc > 0;
3311 	default:
3312 		return amdgpu_dc != 0;
3313 #else
3314 	default:
3315 		if (amdgpu_dc > 0)
3316 			DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3317 					 "but isn't supported by ASIC, ignoring\n");
3318 		return false;
3319 #endif
3320 	}
3321 }
3322 
3323 /**
3324  * amdgpu_device_has_dc_support - check if dc is supported
3325  *
3326  * @adev: amdgpu_device pointer
3327  *
3328  * Returns true for supported, false for not supported
3329  */
3330 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3331 {
3332 	if (amdgpu_sriov_vf(adev) ||
3333 	    adev->enable_virtual_display ||
3334 	    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3335 		return false;
3336 
3337 	return amdgpu_device_asic_has_dc_support(adev->asic_type);
3338 }
3339 
3340 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3341 {
3342 	struct amdgpu_device *adev =
3343 		container_of(__work, struct amdgpu_device, xgmi_reset_work);
3344 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3345 
3346 	/* It's a bug to not have a hive within this function */
3347 	if (WARN_ON(!hive))
3348 		return;
3349 
3350 	/*
3351 	 * Use task barrier to synchronize all xgmi reset works across the
3352 	 * hive. task_barrier_enter and task_barrier_exit will block
3353 	 * until all the threads running the xgmi reset works reach
3354 	 * those points. task_barrier_full will do both blocks.
3355 	 */
3356 	if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3357 
3358 		task_barrier_enter(&hive->tb);
3359 		adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3360 
3361 		if (adev->asic_reset_res)
3362 			goto fail;
3363 
3364 		task_barrier_exit(&hive->tb);
3365 		adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3366 
3367 		if (adev->asic_reset_res)
3368 			goto fail;
3369 
3370 		if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3371 		    adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3372 			adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3373 	} else {
3374 
3375 		task_barrier_full(&hive->tb);
3376 		adev->asic_reset_res =  amdgpu_asic_reset(adev);
3377 	}
3378 
3379 fail:
3380 	if (adev->asic_reset_res)
3381 		DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3382 			 adev->asic_reset_res, adev_to_drm(adev)->unique);
3383 	amdgpu_put_xgmi_hive(hive);
3384 }
3385 
3386 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3387 {
3388 	char *input = amdgpu_lockup_timeout;
3389 	char *timeout_setting = NULL;
3390 	int index = 0;
3391 	long timeout;
3392 	int ret = 0;
3393 
3394 	/*
3395 	 * By default timeout for non compute jobs is 10000
3396 	 * and 60000 for compute jobs.
3397 	 * In SR-IOV or passthrough mode, timeout for compute
3398 	 * jobs are 60000 by default.
3399 	 */
3400 	adev->gfx_timeout = msecs_to_jiffies(10000);
3401 	adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3402 	if (amdgpu_sriov_vf(adev))
3403 		adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3404 					msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3405 	else
3406 		adev->compute_timeout =  msecs_to_jiffies(60000);
3407 
3408 	if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3409 		while ((timeout_setting = strsep(&input, ",")) &&
3410 				strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3411 			ret = kstrtol(timeout_setting, 0, &timeout);
3412 			if (ret)
3413 				return ret;
3414 
3415 			if (timeout == 0) {
3416 				index++;
3417 				continue;
3418 			} else if (timeout < 0) {
3419 				timeout = MAX_SCHEDULE_TIMEOUT;
3420 				dev_warn(adev->dev, "lockup timeout disabled");
3421 				add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3422 			} else {
3423 				timeout = msecs_to_jiffies(timeout);
3424 			}
3425 
3426 			switch (index++) {
3427 			case 0:
3428 				adev->gfx_timeout = timeout;
3429 				break;
3430 			case 1:
3431 				adev->compute_timeout = timeout;
3432 				break;
3433 			case 2:
3434 				adev->sdma_timeout = timeout;
3435 				break;
3436 			case 3:
3437 				adev->video_timeout = timeout;
3438 				break;
3439 			default:
3440 				break;
3441 			}
3442 		}
3443 		/*
3444 		 * There is only one value specified and
3445 		 * it should apply to all non-compute jobs.
3446 		 */
3447 		if (index == 1) {
3448 			adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3449 			if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3450 				adev->compute_timeout = adev->gfx_timeout;
3451 		}
3452 	}
3453 
3454 	return ret;
3455 }
3456 
3457 /**
3458  * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3459  *
3460  * @adev: amdgpu_device pointer
3461  *
3462  * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3463  */
3464 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3465 {
3466 	struct iommu_domain *domain;
3467 
3468 	domain = iommu_get_domain_for_dev(adev->dev);
3469 	if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3470 		adev->ram_is_direct_mapped = true;
3471 }
3472 
3473 static const struct attribute *amdgpu_dev_attributes[] = {
3474 	&dev_attr_product_name.attr,
3475 	&dev_attr_product_number.attr,
3476 	&dev_attr_serial_number.attr,
3477 	&dev_attr_pcie_replay_count.attr,
3478 	NULL
3479 };
3480 
3481 /**
3482  * amdgpu_device_init - initialize the driver
3483  *
3484  * @adev: amdgpu_device pointer
3485  * @flags: driver flags
3486  *
3487  * Initializes the driver info and hw (all asics).
3488  * Returns 0 for success or an error on failure.
3489  * Called at driver startup.
3490  */
3491 int amdgpu_device_init(struct amdgpu_device *adev,
3492 		       uint32_t flags)
3493 {
3494 	struct drm_device *ddev = adev_to_drm(adev);
3495 	struct pci_dev *pdev = adev->pdev;
3496 	int r, i;
3497 	bool px = false;
3498 	u32 max_MBps;
3499 
3500 	adev->shutdown = false;
3501 	adev->flags = flags;
3502 
3503 	if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3504 		adev->asic_type = amdgpu_force_asic_type;
3505 	else
3506 		adev->asic_type = flags & AMD_ASIC_MASK;
3507 
3508 	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3509 	if (amdgpu_emu_mode == 1)
3510 		adev->usec_timeout *= 10;
3511 	adev->gmc.gart_size = 512 * 1024 * 1024;
3512 	adev->accel_working = false;
3513 	adev->num_rings = 0;
3514 	RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3515 	adev->mman.buffer_funcs = NULL;
3516 	adev->mman.buffer_funcs_ring = NULL;
3517 	adev->vm_manager.vm_pte_funcs = NULL;
3518 	adev->vm_manager.vm_pte_num_scheds = 0;
3519 	adev->gmc.gmc_funcs = NULL;
3520 	adev->harvest_ip_mask = 0x0;
3521 	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3522 	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3523 
3524 	adev->smc_rreg = &amdgpu_invalid_rreg;
3525 	adev->smc_wreg = &amdgpu_invalid_wreg;
3526 	adev->pcie_rreg = &amdgpu_invalid_rreg;
3527 	adev->pcie_wreg = &amdgpu_invalid_wreg;
3528 	adev->pciep_rreg = &amdgpu_invalid_rreg;
3529 	adev->pciep_wreg = &amdgpu_invalid_wreg;
3530 	adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3531 	adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3532 	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3533 	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3534 	adev->didt_rreg = &amdgpu_invalid_rreg;
3535 	adev->didt_wreg = &amdgpu_invalid_wreg;
3536 	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3537 	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3538 	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3539 	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3540 
3541 	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3542 		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3543 		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3544 
3545 	/* mutex initialization are all done here so we
3546 	 * can recall function without having locking issues */
3547 	mutex_init(&adev->firmware.mutex);
3548 	mutex_init(&adev->pm.mutex);
3549 	mutex_init(&adev->gfx.gpu_clock_mutex);
3550 	mutex_init(&adev->srbm_mutex);
3551 	mutex_init(&adev->gfx.pipe_reserve_mutex);
3552 	mutex_init(&adev->gfx.gfx_off_mutex);
3553 	mutex_init(&adev->grbm_idx_mutex);
3554 	mutex_init(&adev->mn_lock);
3555 	mutex_init(&adev->virt.vf_errors.lock);
3556 	hash_init(adev->mn_hash);
3557 	mutex_init(&adev->psp.mutex);
3558 	mutex_init(&adev->notifier_lock);
3559 	mutex_init(&adev->pm.stable_pstate_ctx_lock);
3560 	mutex_init(&adev->benchmark_mutex);
3561 
3562 	amdgpu_device_init_apu_flags(adev);
3563 
3564 	r = amdgpu_device_check_arguments(adev);
3565 	if (r)
3566 		return r;
3567 
3568 	spin_lock_init(&adev->mmio_idx_lock);
3569 	spin_lock_init(&adev->smc_idx_lock);
3570 	spin_lock_init(&adev->pcie_idx_lock);
3571 	spin_lock_init(&adev->uvd_ctx_idx_lock);
3572 	spin_lock_init(&adev->didt_idx_lock);
3573 	spin_lock_init(&adev->gc_cac_idx_lock);
3574 	spin_lock_init(&adev->se_cac_idx_lock);
3575 	spin_lock_init(&adev->audio_endpt_idx_lock);
3576 	spin_lock_init(&adev->mm_stats.lock);
3577 
3578 	INIT_LIST_HEAD(&adev->shadow_list);
3579 	mutex_init(&adev->shadow_list_lock);
3580 
3581 	INIT_LIST_HEAD(&adev->reset_list);
3582 
3583 	INIT_LIST_HEAD(&adev->ras_list);
3584 
3585 	INIT_DELAYED_WORK(&adev->delayed_init_work,
3586 			  amdgpu_device_delayed_init_work_handler);
3587 	INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3588 			  amdgpu_device_delay_enable_gfx_off);
3589 
3590 	INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3591 
3592 	adev->gfx.gfx_off_req_count = 1;
3593 	adev->gfx.gfx_off_residency = 0;
3594 	adev->gfx.gfx_off_entrycount = 0;
3595 	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3596 
3597 	atomic_set(&adev->throttling_logging_enabled, 1);
3598 	/*
3599 	 * If throttling continues, logging will be performed every minute
3600 	 * to avoid log flooding. "-1" is subtracted since the thermal
3601 	 * throttling interrupt comes every second. Thus, the total logging
3602 	 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3603 	 * for throttling interrupt) = 60 seconds.
3604 	 */
3605 	ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3606 	ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3607 
3608 	/* Registers mapping */
3609 	/* TODO: block userspace mapping of io register */
3610 	if (adev->asic_type >= CHIP_BONAIRE) {
3611 		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3612 		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3613 	} else {
3614 		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3615 		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3616 	}
3617 
3618 	for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3619 		atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3620 
3621 	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3622 	if (adev->rmmio == NULL) {
3623 		return -ENOMEM;
3624 	}
3625 	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3626 	DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3627 
3628 	amdgpu_device_get_pcie_info(adev);
3629 
3630 	if (amdgpu_mcbp)
3631 		DRM_INFO("MCBP is enabled\n");
3632 
3633 	/*
3634 	 * Reset domain needs to be present early, before XGMI hive discovered
3635 	 * (if any) and intitialized to use reset sem and in_gpu reset flag
3636 	 * early on during init and before calling to RREG32.
3637 	 */
3638 	adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3639 	if (!adev->reset_domain)
3640 		return -ENOMEM;
3641 
3642 	/* detect hw virtualization here */
3643 	amdgpu_detect_virtualization(adev);
3644 
3645 	r = amdgpu_device_get_job_timeout_settings(adev);
3646 	if (r) {
3647 		dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3648 		return r;
3649 	}
3650 
3651 	/* early init functions */
3652 	r = amdgpu_device_ip_early_init(adev);
3653 	if (r)
3654 		return r;
3655 
3656 	/* Enable TMZ based on IP_VERSION */
3657 	amdgpu_gmc_tmz_set(adev);
3658 
3659 	amdgpu_gmc_noretry_set(adev);
3660 	/* Need to get xgmi info early to decide the reset behavior*/
3661 	if (adev->gmc.xgmi.supported) {
3662 		r = adev->gfxhub.funcs->get_xgmi_info(adev);
3663 		if (r)
3664 			return r;
3665 	}
3666 
3667 	/* enable PCIE atomic ops */
3668 	if (amdgpu_sriov_vf(adev))
3669 		adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3670 			adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3671 			(PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3672 	else
3673 		adev->have_atomics_support =
3674 			!pci_enable_atomic_ops_to_root(adev->pdev,
3675 					  PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3676 					  PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3677 	if (!adev->have_atomics_support)
3678 		dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3679 
3680 	/* doorbell bar mapping and doorbell index init*/
3681 	amdgpu_device_doorbell_init(adev);
3682 
3683 	if (amdgpu_emu_mode == 1) {
3684 		/* post the asic on emulation mode */
3685 		emu_soc_asic_init(adev);
3686 		goto fence_driver_init;
3687 	}
3688 
3689 	amdgpu_reset_init(adev);
3690 
3691 	/* detect if we are with an SRIOV vbios */
3692 	amdgpu_device_detect_sriov_bios(adev);
3693 
3694 	/* check if we need to reset the asic
3695 	 *  E.g., driver was not cleanly unloaded previously, etc.
3696 	 */
3697 	if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3698 		if (adev->gmc.xgmi.num_physical_nodes) {
3699 			dev_info(adev->dev, "Pending hive reset.\n");
3700 			adev->gmc.xgmi.pending_reset = true;
3701 			/* Only need to init necessary block for SMU to handle the reset */
3702 			for (i = 0; i < adev->num_ip_blocks; i++) {
3703 				if (!adev->ip_blocks[i].status.valid)
3704 					continue;
3705 				if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3706 				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3707 				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3708 				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3709 					DRM_DEBUG("IP %s disabled for hw_init.\n",
3710 						adev->ip_blocks[i].version->funcs->name);
3711 					adev->ip_blocks[i].status.hw = true;
3712 				}
3713 			}
3714 		} else {
3715 			r = amdgpu_asic_reset(adev);
3716 			if (r) {
3717 				dev_err(adev->dev, "asic reset on init failed\n");
3718 				goto failed;
3719 			}
3720 		}
3721 	}
3722 
3723 	pci_enable_pcie_error_reporting(adev->pdev);
3724 
3725 	/* Post card if necessary */
3726 	if (amdgpu_device_need_post(adev)) {
3727 		if (!adev->bios) {
3728 			dev_err(adev->dev, "no vBIOS found\n");
3729 			r = -EINVAL;
3730 			goto failed;
3731 		}
3732 		DRM_INFO("GPU posting now...\n");
3733 		r = amdgpu_device_asic_init(adev);
3734 		if (r) {
3735 			dev_err(adev->dev, "gpu post error!\n");
3736 			goto failed;
3737 		}
3738 	}
3739 
3740 	if (adev->is_atom_fw) {
3741 		/* Initialize clocks */
3742 		r = amdgpu_atomfirmware_get_clock_info(adev);
3743 		if (r) {
3744 			dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3745 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3746 			goto failed;
3747 		}
3748 	} else {
3749 		/* Initialize clocks */
3750 		r = amdgpu_atombios_get_clock_info(adev);
3751 		if (r) {
3752 			dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3753 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3754 			goto failed;
3755 		}
3756 		/* init i2c buses */
3757 		if (!amdgpu_device_has_dc_support(adev))
3758 			amdgpu_atombios_i2c_init(adev);
3759 	}
3760 
3761 fence_driver_init:
3762 	/* Fence driver */
3763 	r = amdgpu_fence_driver_sw_init(adev);
3764 	if (r) {
3765 		dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3766 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3767 		goto failed;
3768 	}
3769 
3770 	/* init the mode config */
3771 	drm_mode_config_init(adev_to_drm(adev));
3772 
3773 	r = amdgpu_device_ip_init(adev);
3774 	if (r) {
3775 		/* failed in exclusive mode due to timeout */
3776 		if (amdgpu_sriov_vf(adev) &&
3777 		    !amdgpu_sriov_runtime(adev) &&
3778 		    amdgpu_virt_mmio_blocked(adev) &&
3779 		    !amdgpu_virt_wait_reset(adev)) {
3780 			dev_err(adev->dev, "VF exclusive mode timeout\n");
3781 			/* Don't send request since VF is inactive. */
3782 			adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3783 			adev->virt.ops = NULL;
3784 			r = -EAGAIN;
3785 			goto release_ras_con;
3786 		}
3787 		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3788 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3789 		goto release_ras_con;
3790 	}
3791 
3792 	amdgpu_fence_driver_hw_init(adev);
3793 
3794 	dev_info(adev->dev,
3795 		"SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3796 			adev->gfx.config.max_shader_engines,
3797 			adev->gfx.config.max_sh_per_se,
3798 			adev->gfx.config.max_cu_per_sh,
3799 			adev->gfx.cu_info.number);
3800 
3801 	adev->accel_working = true;
3802 
3803 	amdgpu_vm_check_compute_bug(adev);
3804 
3805 	/* Initialize the buffer migration limit. */
3806 	if (amdgpu_moverate >= 0)
3807 		max_MBps = amdgpu_moverate;
3808 	else
3809 		max_MBps = 8; /* Allow 8 MB/s. */
3810 	/* Get a log2 for easy divisions. */
3811 	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3812 
3813 	r = amdgpu_pm_sysfs_init(adev);
3814 	if (r) {
3815 		adev->pm_sysfs_en = false;
3816 		DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3817 	} else
3818 		adev->pm_sysfs_en = true;
3819 
3820 	r = amdgpu_ucode_sysfs_init(adev);
3821 	if (r) {
3822 		adev->ucode_sysfs_en = false;
3823 		DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3824 	} else
3825 		adev->ucode_sysfs_en = true;
3826 
3827 	r = amdgpu_psp_sysfs_init(adev);
3828 	if (r) {
3829 		adev->psp_sysfs_en = false;
3830 		if (!amdgpu_sriov_vf(adev))
3831 			DRM_ERROR("Creating psp sysfs failed\n");
3832 	} else
3833 		adev->psp_sysfs_en = true;
3834 
3835 	/*
3836 	 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3837 	 * Otherwise the mgpu fan boost feature will be skipped due to the
3838 	 * gpu instance is counted less.
3839 	 */
3840 	amdgpu_register_gpu_instance(adev);
3841 
3842 	/* enable clockgating, etc. after ib tests, etc. since some blocks require
3843 	 * explicit gating rather than handling it automatically.
3844 	 */
3845 	if (!adev->gmc.xgmi.pending_reset) {
3846 		r = amdgpu_device_ip_late_init(adev);
3847 		if (r) {
3848 			dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3849 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3850 			goto release_ras_con;
3851 		}
3852 		/* must succeed. */
3853 		amdgpu_ras_resume(adev);
3854 		queue_delayed_work(system_wq, &adev->delayed_init_work,
3855 				   msecs_to_jiffies(AMDGPU_RESUME_MS));
3856 	}
3857 
3858 	if (amdgpu_sriov_vf(adev))
3859 		flush_delayed_work(&adev->delayed_init_work);
3860 
3861 	r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3862 	if (r)
3863 		dev_err(adev->dev, "Could not create amdgpu device attr\n");
3864 
3865 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
3866 		r = amdgpu_pmu_init(adev);
3867 	if (r)
3868 		dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3869 
3870 	/* Have stored pci confspace at hand for restore in sudden PCI error */
3871 	if (amdgpu_device_cache_pci_state(adev->pdev))
3872 		pci_restore_state(pdev);
3873 
3874 	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3875 	/* this will fail for cards that aren't VGA class devices, just
3876 	 * ignore it */
3877 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3878 		vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3879 
3880 	if (amdgpu_device_supports_px(ddev)) {
3881 		px = true;
3882 		vga_switcheroo_register_client(adev->pdev,
3883 					       &amdgpu_switcheroo_ops, px);
3884 		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3885 	}
3886 
3887 	if (adev->gmc.xgmi.pending_reset)
3888 		queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3889 				   msecs_to_jiffies(AMDGPU_RESUME_MS));
3890 
3891 	amdgpu_device_check_iommu_direct_map(adev);
3892 
3893 	return 0;
3894 
3895 release_ras_con:
3896 	amdgpu_release_ras_context(adev);
3897 
3898 failed:
3899 	amdgpu_vf_error_trans_all(adev);
3900 
3901 	return r;
3902 }
3903 
3904 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3905 {
3906 
3907 	/* Clear all CPU mappings pointing to this device */
3908 	unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3909 
3910 	/* Unmap all mapped bars - Doorbell, registers and VRAM */
3911 	amdgpu_device_doorbell_fini(adev);
3912 
3913 	iounmap(adev->rmmio);
3914 	adev->rmmio = NULL;
3915 	if (adev->mman.aper_base_kaddr)
3916 		iounmap(adev->mman.aper_base_kaddr);
3917 	adev->mman.aper_base_kaddr = NULL;
3918 
3919 	/* Memory manager related */
3920 	if (!adev->gmc.xgmi.connected_to_cpu) {
3921 		arch_phys_wc_del(adev->gmc.vram_mtrr);
3922 		arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3923 	}
3924 }
3925 
3926 /**
3927  * amdgpu_device_fini_hw - tear down the driver
3928  *
3929  * @adev: amdgpu_device pointer
3930  *
3931  * Tear down the driver info (all asics).
3932  * Called at driver shutdown.
3933  */
3934 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3935 {
3936 	dev_info(adev->dev, "amdgpu: finishing device.\n");
3937 	flush_delayed_work(&adev->delayed_init_work);
3938 	adev->shutdown = true;
3939 
3940 	/* make sure IB test finished before entering exclusive mode
3941 	 * to avoid preemption on IB test
3942 	 * */
3943 	if (amdgpu_sriov_vf(adev)) {
3944 		amdgpu_virt_request_full_gpu(adev, false);
3945 		amdgpu_virt_fini_data_exchange(adev);
3946 	}
3947 
3948 	/* disable all interrupts */
3949 	amdgpu_irq_disable_all(adev);
3950 	if (adev->mode_info.mode_config_initialized){
3951 		if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
3952 			drm_helper_force_disable_all(adev_to_drm(adev));
3953 		else
3954 			drm_atomic_helper_shutdown(adev_to_drm(adev));
3955 	}
3956 	amdgpu_fence_driver_hw_fini(adev);
3957 
3958 	if (adev->mman.initialized) {
3959 		flush_delayed_work(&adev->mman.bdev.wq);
3960 		ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3961 	}
3962 
3963 	if (adev->pm_sysfs_en)
3964 		amdgpu_pm_sysfs_fini(adev);
3965 	if (adev->ucode_sysfs_en)
3966 		amdgpu_ucode_sysfs_fini(adev);
3967 	if (adev->psp_sysfs_en)
3968 		amdgpu_psp_sysfs_fini(adev);
3969 	sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3970 
3971 	/* disable ras feature must before hw fini */
3972 	amdgpu_ras_pre_fini(adev);
3973 
3974 	amdgpu_device_ip_fini_early(adev);
3975 
3976 	amdgpu_irq_fini_hw(adev);
3977 
3978 	if (adev->mman.initialized)
3979 		ttm_device_clear_dma_mappings(&adev->mman.bdev);
3980 
3981 	amdgpu_gart_dummy_page_fini(adev);
3982 
3983 	amdgpu_device_unmap_mmio(adev);
3984 
3985 }
3986 
3987 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
3988 {
3989 	int idx;
3990 
3991 	amdgpu_fence_driver_sw_fini(adev);
3992 	amdgpu_device_ip_fini(adev);
3993 	release_firmware(adev->firmware.gpu_info_fw);
3994 	adev->firmware.gpu_info_fw = NULL;
3995 	adev->accel_working = false;
3996 	dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
3997 
3998 	amdgpu_reset_fini(adev);
3999 
4000 	/* free i2c buses */
4001 	if (!amdgpu_device_has_dc_support(adev))
4002 		amdgpu_i2c_fini(adev);
4003 
4004 	if (amdgpu_emu_mode != 1)
4005 		amdgpu_atombios_fini(adev);
4006 
4007 	kfree(adev->bios);
4008 	adev->bios = NULL;
4009 	if (amdgpu_device_supports_px(adev_to_drm(adev))) {
4010 		vga_switcheroo_unregister_client(adev->pdev);
4011 		vga_switcheroo_fini_domain_pm_ops(adev->dev);
4012 	}
4013 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4014 		vga_client_unregister(adev->pdev);
4015 
4016 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4017 
4018 		iounmap(adev->rmmio);
4019 		adev->rmmio = NULL;
4020 		amdgpu_device_doorbell_fini(adev);
4021 		drm_dev_exit(idx);
4022 	}
4023 
4024 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
4025 		amdgpu_pmu_fini(adev);
4026 	if (adev->mman.discovery_bin)
4027 		amdgpu_discovery_fini(adev);
4028 
4029 	amdgpu_reset_put_reset_domain(adev->reset_domain);
4030 	adev->reset_domain = NULL;
4031 
4032 	kfree(adev->pci_state);
4033 
4034 }
4035 
4036 /**
4037  * amdgpu_device_evict_resources - evict device resources
4038  * @adev: amdgpu device object
4039  *
4040  * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4041  * of the vram memory type. Mainly used for evicting device resources
4042  * at suspend time.
4043  *
4044  */
4045 static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
4046 {
4047 	/* No need to evict vram on APUs for suspend to ram or s2idle */
4048 	if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4049 		return;
4050 
4051 	if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM))
4052 		DRM_WARN("evicting device resources failed\n");
4053 
4054 }
4055 
4056 /*
4057  * Suspend & resume.
4058  */
4059 /**
4060  * amdgpu_device_suspend - initiate device suspend
4061  *
4062  * @dev: drm dev pointer
4063  * @fbcon : notify the fbdev of suspend
4064  *
4065  * Puts the hw in the suspend state (all asics).
4066  * Returns 0 for success or an error on failure.
4067  * Called at driver suspend.
4068  */
4069 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4070 {
4071 	struct amdgpu_device *adev = drm_to_adev(dev);
4072 
4073 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4074 		return 0;
4075 
4076 	adev->in_suspend = true;
4077 
4078 	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4079 		DRM_WARN("smart shift update failed\n");
4080 
4081 	drm_kms_helper_poll_disable(dev);
4082 
4083 	if (fbcon)
4084 		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4085 
4086 	cancel_delayed_work_sync(&adev->delayed_init_work);
4087 
4088 	amdgpu_ras_suspend(adev);
4089 
4090 	amdgpu_device_ip_suspend_phase1(adev);
4091 
4092 	if (!adev->in_s0ix)
4093 		amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4094 
4095 	amdgpu_device_evict_resources(adev);
4096 
4097 	amdgpu_fence_driver_hw_fini(adev);
4098 
4099 	amdgpu_device_ip_suspend_phase2(adev);
4100 
4101 	return 0;
4102 }
4103 
4104 /**
4105  * amdgpu_device_resume - initiate device resume
4106  *
4107  * @dev: drm dev pointer
4108  * @fbcon : notify the fbdev of resume
4109  *
4110  * Bring the hw back to operating state (all asics).
4111  * Returns 0 for success or an error on failure.
4112  * Called at driver resume.
4113  */
4114 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4115 {
4116 	struct amdgpu_device *adev = drm_to_adev(dev);
4117 	int r = 0;
4118 
4119 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4120 		return 0;
4121 
4122 	if (adev->in_s0ix)
4123 		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4124 
4125 	/* post card */
4126 	if (amdgpu_device_need_post(adev)) {
4127 		r = amdgpu_device_asic_init(adev);
4128 		if (r)
4129 			dev_err(adev->dev, "amdgpu asic init failed\n");
4130 	}
4131 
4132 	r = amdgpu_device_ip_resume(adev);
4133 	if (r) {
4134 		dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4135 		return r;
4136 	}
4137 	amdgpu_fence_driver_hw_init(adev);
4138 
4139 	r = amdgpu_device_ip_late_init(adev);
4140 	if (r)
4141 		return r;
4142 
4143 	queue_delayed_work(system_wq, &adev->delayed_init_work,
4144 			   msecs_to_jiffies(AMDGPU_RESUME_MS));
4145 
4146 	if (!adev->in_s0ix) {
4147 		r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4148 		if (r)
4149 			return r;
4150 	}
4151 
4152 	/* Make sure IB tests flushed */
4153 	flush_delayed_work(&adev->delayed_init_work);
4154 
4155 	if (fbcon)
4156 		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4157 
4158 	drm_kms_helper_poll_enable(dev);
4159 
4160 	amdgpu_ras_resume(adev);
4161 
4162 	/*
4163 	 * Most of the connector probing functions try to acquire runtime pm
4164 	 * refs to ensure that the GPU is powered on when connector polling is
4165 	 * performed. Since we're calling this from a runtime PM callback,
4166 	 * trying to acquire rpm refs will cause us to deadlock.
4167 	 *
4168 	 * Since we're guaranteed to be holding the rpm lock, it's safe to
4169 	 * temporarily disable the rpm helpers so this doesn't deadlock us.
4170 	 */
4171 #ifdef CONFIG_PM
4172 	dev->dev->power.disable_depth++;
4173 #endif
4174 	if (!amdgpu_device_has_dc_support(adev))
4175 		drm_helper_hpd_irq_event(dev);
4176 	else
4177 		drm_kms_helper_hotplug_event(dev);
4178 #ifdef CONFIG_PM
4179 	dev->dev->power.disable_depth--;
4180 #endif
4181 	adev->in_suspend = false;
4182 
4183 	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4184 		DRM_WARN("smart shift update failed\n");
4185 
4186 	return 0;
4187 }
4188 
4189 /**
4190  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4191  *
4192  * @adev: amdgpu_device pointer
4193  *
4194  * The list of all the hardware IPs that make up the asic is walked and
4195  * the check_soft_reset callbacks are run.  check_soft_reset determines
4196  * if the asic is still hung or not.
4197  * Returns true if any of the IPs are still in a hung state, false if not.
4198  */
4199 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4200 {
4201 	int i;
4202 	bool asic_hang = false;
4203 
4204 	if (amdgpu_sriov_vf(adev))
4205 		return true;
4206 
4207 	if (amdgpu_asic_need_full_reset(adev))
4208 		return true;
4209 
4210 	for (i = 0; i < adev->num_ip_blocks; i++) {
4211 		if (!adev->ip_blocks[i].status.valid)
4212 			continue;
4213 		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4214 			adev->ip_blocks[i].status.hang =
4215 				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4216 		if (adev->ip_blocks[i].status.hang) {
4217 			dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4218 			asic_hang = true;
4219 		}
4220 	}
4221 	return asic_hang;
4222 }
4223 
4224 /**
4225  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4226  *
4227  * @adev: amdgpu_device pointer
4228  *
4229  * The list of all the hardware IPs that make up the asic is walked and the
4230  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4231  * handles any IP specific hardware or software state changes that are
4232  * necessary for a soft reset to succeed.
4233  * Returns 0 on success, negative error code on failure.
4234  */
4235 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4236 {
4237 	int i, r = 0;
4238 
4239 	for (i = 0; i < adev->num_ip_blocks; i++) {
4240 		if (!adev->ip_blocks[i].status.valid)
4241 			continue;
4242 		if (adev->ip_blocks[i].status.hang &&
4243 		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4244 			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4245 			if (r)
4246 				return r;
4247 		}
4248 	}
4249 
4250 	return 0;
4251 }
4252 
4253 /**
4254  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4255  *
4256  * @adev: amdgpu_device pointer
4257  *
4258  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4259  * reset is necessary to recover.
4260  * Returns true if a full asic reset is required, false if not.
4261  */
4262 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4263 {
4264 	int i;
4265 
4266 	if (amdgpu_asic_need_full_reset(adev))
4267 		return true;
4268 
4269 	for (i = 0; i < adev->num_ip_blocks; i++) {
4270 		if (!adev->ip_blocks[i].status.valid)
4271 			continue;
4272 		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4273 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4274 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4275 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4276 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4277 			if (adev->ip_blocks[i].status.hang) {
4278 				dev_info(adev->dev, "Some block need full reset!\n");
4279 				return true;
4280 			}
4281 		}
4282 	}
4283 	return false;
4284 }
4285 
4286 /**
4287  * amdgpu_device_ip_soft_reset - do a soft reset
4288  *
4289  * @adev: amdgpu_device pointer
4290  *
4291  * The list of all the hardware IPs that make up the asic is walked and the
4292  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4293  * IP specific hardware or software state changes that are necessary to soft
4294  * reset the IP.
4295  * Returns 0 on success, negative error code on failure.
4296  */
4297 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4298 {
4299 	int i, r = 0;
4300 
4301 	for (i = 0; i < adev->num_ip_blocks; i++) {
4302 		if (!adev->ip_blocks[i].status.valid)
4303 			continue;
4304 		if (adev->ip_blocks[i].status.hang &&
4305 		    adev->ip_blocks[i].version->funcs->soft_reset) {
4306 			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4307 			if (r)
4308 				return r;
4309 		}
4310 	}
4311 
4312 	return 0;
4313 }
4314 
4315 /**
4316  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4317  *
4318  * @adev: amdgpu_device pointer
4319  *
4320  * The list of all the hardware IPs that make up the asic is walked and the
4321  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4322  * handles any IP specific hardware or software state changes that are
4323  * necessary after the IP has been soft reset.
4324  * Returns 0 on success, negative error code on failure.
4325  */
4326 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4327 {
4328 	int i, r = 0;
4329 
4330 	for (i = 0; i < adev->num_ip_blocks; i++) {
4331 		if (!adev->ip_blocks[i].status.valid)
4332 			continue;
4333 		if (adev->ip_blocks[i].status.hang &&
4334 		    adev->ip_blocks[i].version->funcs->post_soft_reset)
4335 			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4336 		if (r)
4337 			return r;
4338 	}
4339 
4340 	return 0;
4341 }
4342 
4343 /**
4344  * amdgpu_device_recover_vram - Recover some VRAM contents
4345  *
4346  * @adev: amdgpu_device pointer
4347  *
4348  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4349  * restore things like GPUVM page tables after a GPU reset where
4350  * the contents of VRAM might be lost.
4351  *
4352  * Returns:
4353  * 0 on success, negative error code on failure.
4354  */
4355 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4356 {
4357 	struct dma_fence *fence = NULL, *next = NULL;
4358 	struct amdgpu_bo *shadow;
4359 	struct amdgpu_bo_vm *vmbo;
4360 	long r = 1, tmo;
4361 
4362 	if (amdgpu_sriov_runtime(adev))
4363 		tmo = msecs_to_jiffies(8000);
4364 	else
4365 		tmo = msecs_to_jiffies(100);
4366 
4367 	dev_info(adev->dev, "recover vram bo from shadow start\n");
4368 	mutex_lock(&adev->shadow_list_lock);
4369 	list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4370 		shadow = &vmbo->bo;
4371 		/* No need to recover an evicted BO */
4372 		if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4373 		    shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4374 		    shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4375 			continue;
4376 
4377 		r = amdgpu_bo_restore_shadow(shadow, &next);
4378 		if (r)
4379 			break;
4380 
4381 		if (fence) {
4382 			tmo = dma_fence_wait_timeout(fence, false, tmo);
4383 			dma_fence_put(fence);
4384 			fence = next;
4385 			if (tmo == 0) {
4386 				r = -ETIMEDOUT;
4387 				break;
4388 			} else if (tmo < 0) {
4389 				r = tmo;
4390 				break;
4391 			}
4392 		} else {
4393 			fence = next;
4394 		}
4395 	}
4396 	mutex_unlock(&adev->shadow_list_lock);
4397 
4398 	if (fence)
4399 		tmo = dma_fence_wait_timeout(fence, false, tmo);
4400 	dma_fence_put(fence);
4401 
4402 	if (r < 0 || tmo <= 0) {
4403 		dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4404 		return -EIO;
4405 	}
4406 
4407 	dev_info(adev->dev, "recover vram bo from shadow done\n");
4408 	return 0;
4409 }
4410 
4411 
4412 /**
4413  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4414  *
4415  * @adev: amdgpu_device pointer
4416  * @from_hypervisor: request from hypervisor
4417  *
4418  * do VF FLR and reinitialize Asic
4419  * return 0 means succeeded otherwise failed
4420  */
4421 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4422 				     bool from_hypervisor)
4423 {
4424 	int r;
4425 	struct amdgpu_hive_info *hive = NULL;
4426 	int retry_limit = 0;
4427 
4428 retry:
4429 	amdgpu_amdkfd_pre_reset(adev);
4430 
4431 	if (from_hypervisor)
4432 		r = amdgpu_virt_request_full_gpu(adev, true);
4433 	else
4434 		r = amdgpu_virt_reset_gpu(adev);
4435 	if (r)
4436 		return r;
4437 
4438 	/* Resume IP prior to SMC */
4439 	r = amdgpu_device_ip_reinit_early_sriov(adev);
4440 	if (r)
4441 		goto error;
4442 
4443 	amdgpu_virt_init_data_exchange(adev);
4444 
4445 	r = amdgpu_device_fw_loading(adev);
4446 	if (r)
4447 		return r;
4448 
4449 	/* now we are okay to resume SMC/CP/SDMA */
4450 	r = amdgpu_device_ip_reinit_late_sriov(adev);
4451 	if (r)
4452 		goto error;
4453 
4454 	hive = amdgpu_get_xgmi_hive(adev);
4455 	/* Update PSP FW topology after reset */
4456 	if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4457 		r = amdgpu_xgmi_update_topology(hive, adev);
4458 
4459 	if (hive)
4460 		amdgpu_put_xgmi_hive(hive);
4461 
4462 	if (!r) {
4463 		amdgpu_irq_gpu_reset_resume_helper(adev);
4464 		r = amdgpu_ib_ring_tests(adev);
4465 
4466 		amdgpu_amdkfd_post_reset(adev);
4467 	}
4468 
4469 error:
4470 	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4471 		amdgpu_inc_vram_lost(adev);
4472 		r = amdgpu_device_recover_vram(adev);
4473 	}
4474 	amdgpu_virt_release_full_gpu(adev, true);
4475 
4476 	if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4477 		if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4478 			retry_limit++;
4479 			goto retry;
4480 		} else
4481 			DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4482 	}
4483 
4484 	return r;
4485 }
4486 
4487 /**
4488  * amdgpu_device_has_job_running - check if there is any job in mirror list
4489  *
4490  * @adev: amdgpu_device pointer
4491  *
4492  * check if there is any job in mirror list
4493  */
4494 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4495 {
4496 	int i;
4497 	struct drm_sched_job *job;
4498 
4499 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4500 		struct amdgpu_ring *ring = adev->rings[i];
4501 
4502 		if (!ring || !ring->sched.thread)
4503 			continue;
4504 
4505 		spin_lock(&ring->sched.job_list_lock);
4506 		job = list_first_entry_or_null(&ring->sched.pending_list,
4507 					       struct drm_sched_job, list);
4508 		spin_unlock(&ring->sched.job_list_lock);
4509 		if (job)
4510 			return true;
4511 	}
4512 	return false;
4513 }
4514 
4515 /**
4516  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4517  *
4518  * @adev: amdgpu_device pointer
4519  *
4520  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4521  * a hung GPU.
4522  */
4523 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4524 {
4525 
4526 	if (amdgpu_gpu_recovery == 0)
4527 		goto disabled;
4528 
4529 	if (!amdgpu_device_ip_check_soft_reset(adev)) {
4530 		dev_info(adev->dev,"Timeout, but no hardware hang detected.\n");
4531 		return false;
4532 	}
4533 
4534 	if (amdgpu_sriov_vf(adev))
4535 		return true;
4536 
4537 	if (amdgpu_gpu_recovery == -1) {
4538 		switch (adev->asic_type) {
4539 #ifdef CONFIG_DRM_AMDGPU_SI
4540 		case CHIP_VERDE:
4541 		case CHIP_TAHITI:
4542 		case CHIP_PITCAIRN:
4543 		case CHIP_OLAND:
4544 		case CHIP_HAINAN:
4545 #endif
4546 #ifdef CONFIG_DRM_AMDGPU_CIK
4547 		case CHIP_KAVERI:
4548 		case CHIP_KABINI:
4549 		case CHIP_MULLINS:
4550 #endif
4551 		case CHIP_CARRIZO:
4552 		case CHIP_STONEY:
4553 		case CHIP_CYAN_SKILLFISH:
4554 			goto disabled;
4555 		default:
4556 			break;
4557 		}
4558 	}
4559 
4560 	return true;
4561 
4562 disabled:
4563 		dev_info(adev->dev, "GPU recovery disabled.\n");
4564 		return false;
4565 }
4566 
4567 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4568 {
4569         u32 i;
4570         int ret = 0;
4571 
4572         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4573 
4574         dev_info(adev->dev, "GPU mode1 reset\n");
4575 
4576         /* disable BM */
4577         pci_clear_master(adev->pdev);
4578 
4579         amdgpu_device_cache_pci_state(adev->pdev);
4580 
4581         if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4582                 dev_info(adev->dev, "GPU smu mode1 reset\n");
4583                 ret = amdgpu_dpm_mode1_reset(adev);
4584         } else {
4585                 dev_info(adev->dev, "GPU psp mode1 reset\n");
4586                 ret = psp_gpu_reset(adev);
4587         }
4588 
4589         if (ret)
4590                 dev_err(adev->dev, "GPU mode1 reset failed\n");
4591 
4592         amdgpu_device_load_pci_state(adev->pdev);
4593 
4594         /* wait for asic to come out of reset */
4595         for (i = 0; i < adev->usec_timeout; i++) {
4596                 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4597 
4598                 if (memsize != 0xffffffff)
4599                         break;
4600                 udelay(1);
4601         }
4602 
4603         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4604         return ret;
4605 }
4606 
4607 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4608 				 struct amdgpu_reset_context *reset_context)
4609 {
4610 	int i, r = 0;
4611 	struct amdgpu_job *job = NULL;
4612 	bool need_full_reset =
4613 		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4614 
4615 	if (reset_context->reset_req_dev == adev)
4616 		job = reset_context->job;
4617 
4618 	if (amdgpu_sriov_vf(adev)) {
4619 		/* stop the data exchange thread */
4620 		amdgpu_virt_fini_data_exchange(adev);
4621 	}
4622 
4623 	amdgpu_fence_driver_isr_toggle(adev, true);
4624 
4625 	/* block all schedulers and reset given job's ring */
4626 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4627 		struct amdgpu_ring *ring = adev->rings[i];
4628 
4629 		if (!ring || !ring->sched.thread)
4630 			continue;
4631 
4632 		/*clear job fence from fence drv to avoid force_completion
4633 		 *leave NULL and vm flush fence in fence drv */
4634 		amdgpu_fence_driver_clear_job_fences(ring);
4635 
4636 		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4637 		amdgpu_fence_driver_force_completion(ring);
4638 	}
4639 
4640 	amdgpu_fence_driver_isr_toggle(adev, false);
4641 
4642 	if (job && job->vm)
4643 		drm_sched_increase_karma(&job->base);
4644 
4645 	r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4646 	/* If reset handler not implemented, continue; otherwise return */
4647 	if (r == -ENOSYS)
4648 		r = 0;
4649 	else
4650 		return r;
4651 
4652 	/* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4653 	if (!amdgpu_sriov_vf(adev)) {
4654 
4655 		if (!need_full_reset)
4656 			need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4657 
4658 		if (!need_full_reset && amdgpu_gpu_recovery) {
4659 			amdgpu_device_ip_pre_soft_reset(adev);
4660 			r = amdgpu_device_ip_soft_reset(adev);
4661 			amdgpu_device_ip_post_soft_reset(adev);
4662 			if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4663 				dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4664 				need_full_reset = true;
4665 			}
4666 		}
4667 
4668 		if (need_full_reset)
4669 			r = amdgpu_device_ip_suspend(adev);
4670 		if (need_full_reset)
4671 			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4672 		else
4673 			clear_bit(AMDGPU_NEED_FULL_RESET,
4674 				  &reset_context->flags);
4675 	}
4676 
4677 	return r;
4678 }
4679 
4680 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
4681 {
4682 	int i;
4683 
4684 	lockdep_assert_held(&adev->reset_domain->sem);
4685 
4686 	for (i = 0; i < adev->num_regs; i++) {
4687 		adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]);
4688 		trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
4689 					     adev->reset_dump_reg_value[i]);
4690 	}
4691 
4692 	return 0;
4693 }
4694 
4695 #ifdef CONFIG_DEV_COREDUMP
4696 static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
4697 		size_t count, void *data, size_t datalen)
4698 {
4699 	struct drm_printer p;
4700 	struct amdgpu_device *adev = data;
4701 	struct drm_print_iterator iter;
4702 	int i;
4703 
4704 	iter.data = buffer;
4705 	iter.offset = 0;
4706 	iter.start = offset;
4707 	iter.remain = count;
4708 
4709 	p = drm_coredump_printer(&iter);
4710 
4711 	drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
4712 	drm_printf(&p, "kernel: " UTS_RELEASE "\n");
4713 	drm_printf(&p, "module: " KBUILD_MODNAME "\n");
4714 	drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec);
4715 	if (adev->reset_task_info.pid)
4716 		drm_printf(&p, "process_name: %s PID: %d\n",
4717 			   adev->reset_task_info.process_name,
4718 			   adev->reset_task_info.pid);
4719 
4720 	if (adev->reset_vram_lost)
4721 		drm_printf(&p, "VRAM is lost due to GPU reset!\n");
4722 	if (adev->num_regs) {
4723 		drm_printf(&p, "AMDGPU register dumps:\nOffset:     Value:\n");
4724 
4725 		for (i = 0; i < adev->num_regs; i++)
4726 			drm_printf(&p, "0x%08x: 0x%08x\n",
4727 				   adev->reset_dump_reg_list[i],
4728 				   adev->reset_dump_reg_value[i]);
4729 	}
4730 
4731 	return count - iter.remain;
4732 }
4733 
4734 static void amdgpu_devcoredump_free(void *data)
4735 {
4736 }
4737 
4738 static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
4739 {
4740 	struct drm_device *dev = adev_to_drm(adev);
4741 
4742 	ktime_get_ts64(&adev->reset_time);
4743 	dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
4744 		      amdgpu_devcoredump_read, amdgpu_devcoredump_free);
4745 }
4746 #endif
4747 
4748 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4749 			 struct amdgpu_reset_context *reset_context)
4750 {
4751 	struct amdgpu_device *tmp_adev = NULL;
4752 	bool need_full_reset, skip_hw_reset, vram_lost = false;
4753 	int r = 0;
4754 	bool gpu_reset_for_dev_remove = 0;
4755 
4756 	/* Try reset handler method first */
4757 	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4758 				    reset_list);
4759 	amdgpu_reset_reg_dumps(tmp_adev);
4760 
4761 	reset_context->reset_device_list = device_list_handle;
4762 	r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4763 	/* If reset handler not implemented, continue; otherwise return */
4764 	if (r == -ENOSYS)
4765 		r = 0;
4766 	else
4767 		return r;
4768 
4769 	/* Reset handler not implemented, use the default method */
4770 	need_full_reset =
4771 		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4772 	skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4773 
4774 	gpu_reset_for_dev_remove =
4775 		test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
4776 			test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4777 
4778 	/*
4779 	 * ASIC reset has to be done on all XGMI hive nodes ASAP
4780 	 * to allow proper links negotiation in FW (within 1 sec)
4781 	 */
4782 	if (!skip_hw_reset && need_full_reset) {
4783 		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4784 			/* For XGMI run all resets in parallel to speed up the process */
4785 			if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4786 				tmp_adev->gmc.xgmi.pending_reset = false;
4787 				if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4788 					r = -EALREADY;
4789 			} else
4790 				r = amdgpu_asic_reset(tmp_adev);
4791 
4792 			if (r) {
4793 				dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4794 					 r, adev_to_drm(tmp_adev)->unique);
4795 				break;
4796 			}
4797 		}
4798 
4799 		/* For XGMI wait for all resets to complete before proceed */
4800 		if (!r) {
4801 			list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4802 				if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4803 					flush_work(&tmp_adev->xgmi_reset_work);
4804 					r = tmp_adev->asic_reset_res;
4805 					if (r)
4806 						break;
4807 				}
4808 			}
4809 		}
4810 	}
4811 
4812 	if (!r && amdgpu_ras_intr_triggered()) {
4813 		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4814 			if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
4815 			    tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
4816 				tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
4817 		}
4818 
4819 		amdgpu_ras_intr_cleared();
4820 	}
4821 
4822 	/* Since the mode1 reset affects base ip blocks, the
4823 	 * phase1 ip blocks need to be resumed. Otherwise there
4824 	 * will be a BIOS signature error and the psp bootloader
4825 	 * can't load kdb on the next amdgpu install.
4826 	 */
4827 	if (gpu_reset_for_dev_remove) {
4828 		list_for_each_entry(tmp_adev, device_list_handle, reset_list)
4829 			amdgpu_device_ip_resume_phase1(tmp_adev);
4830 
4831 		goto end;
4832 	}
4833 
4834 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4835 		if (need_full_reset) {
4836 			/* post card */
4837 			r = amdgpu_device_asic_init(tmp_adev);
4838 			if (r) {
4839 				dev_warn(tmp_adev->dev, "asic atom init failed!");
4840 			} else {
4841 				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4842 				r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4843 				if (r)
4844 					goto out;
4845 
4846 				r = amdgpu_device_ip_resume_phase1(tmp_adev);
4847 				if (r)
4848 					goto out;
4849 
4850 				vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4851 #ifdef CONFIG_DEV_COREDUMP
4852 				tmp_adev->reset_vram_lost = vram_lost;
4853 				memset(&tmp_adev->reset_task_info, 0,
4854 						sizeof(tmp_adev->reset_task_info));
4855 				if (reset_context->job && reset_context->job->vm)
4856 					tmp_adev->reset_task_info =
4857 						reset_context->job->vm->task_info;
4858 				amdgpu_reset_capture_coredumpm(tmp_adev);
4859 #endif
4860 				if (vram_lost) {
4861 					DRM_INFO("VRAM is lost due to GPU reset!\n");
4862 					amdgpu_inc_vram_lost(tmp_adev);
4863 				}
4864 
4865 				r = amdgpu_device_fw_loading(tmp_adev);
4866 				if (r)
4867 					return r;
4868 
4869 				r = amdgpu_device_ip_resume_phase2(tmp_adev);
4870 				if (r)
4871 					goto out;
4872 
4873 				if (vram_lost)
4874 					amdgpu_device_fill_reset_magic(tmp_adev);
4875 
4876 				/*
4877 				 * Add this ASIC as tracked as reset was already
4878 				 * complete successfully.
4879 				 */
4880 				amdgpu_register_gpu_instance(tmp_adev);
4881 
4882 				if (!reset_context->hive &&
4883 				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4884 					amdgpu_xgmi_add_device(tmp_adev);
4885 
4886 				r = amdgpu_device_ip_late_init(tmp_adev);
4887 				if (r)
4888 					goto out;
4889 
4890 				drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
4891 
4892 				/*
4893 				 * The GPU enters bad state once faulty pages
4894 				 * by ECC has reached the threshold, and ras
4895 				 * recovery is scheduled next. So add one check
4896 				 * here to break recovery if it indeed exceeds
4897 				 * bad page threshold, and remind user to
4898 				 * retire this GPU or setting one bigger
4899 				 * bad_page_threshold value to fix this once
4900 				 * probing driver again.
4901 				 */
4902 				if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4903 					/* must succeed. */
4904 					amdgpu_ras_resume(tmp_adev);
4905 				} else {
4906 					r = -EINVAL;
4907 					goto out;
4908 				}
4909 
4910 				/* Update PSP FW topology after reset */
4911 				if (reset_context->hive &&
4912 				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4913 					r = amdgpu_xgmi_update_topology(
4914 						reset_context->hive, tmp_adev);
4915 			}
4916 		}
4917 
4918 out:
4919 		if (!r) {
4920 			amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4921 			r = amdgpu_ib_ring_tests(tmp_adev);
4922 			if (r) {
4923 				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4924 				need_full_reset = true;
4925 				r = -EAGAIN;
4926 				goto end;
4927 			}
4928 		}
4929 
4930 		if (!r)
4931 			r = amdgpu_device_recover_vram(tmp_adev);
4932 		else
4933 			tmp_adev->asic_reset_res = r;
4934 	}
4935 
4936 end:
4937 	if (need_full_reset)
4938 		set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4939 	else
4940 		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4941 	return r;
4942 }
4943 
4944 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
4945 {
4946 
4947 	switch (amdgpu_asic_reset_method(adev)) {
4948 	case AMD_RESET_METHOD_MODE1:
4949 		adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4950 		break;
4951 	case AMD_RESET_METHOD_MODE2:
4952 		adev->mp1_state = PP_MP1_STATE_RESET;
4953 		break;
4954 	default:
4955 		adev->mp1_state = PP_MP1_STATE_NONE;
4956 		break;
4957 	}
4958 }
4959 
4960 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
4961 {
4962 	amdgpu_vf_error_trans_all(adev);
4963 	adev->mp1_state = PP_MP1_STATE_NONE;
4964 }
4965 
4966 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4967 {
4968 	struct pci_dev *p = NULL;
4969 
4970 	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4971 			adev->pdev->bus->number, 1);
4972 	if (p) {
4973 		pm_runtime_enable(&(p->dev));
4974 		pm_runtime_resume(&(p->dev));
4975 	}
4976 }
4977 
4978 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4979 {
4980 	enum amd_reset_method reset_method;
4981 	struct pci_dev *p = NULL;
4982 	u64 expires;
4983 
4984 	/*
4985 	 * For now, only BACO and mode1 reset are confirmed
4986 	 * to suffer the audio issue without proper suspended.
4987 	 */
4988 	reset_method = amdgpu_asic_reset_method(adev);
4989 	if ((reset_method != AMD_RESET_METHOD_BACO) &&
4990 	     (reset_method != AMD_RESET_METHOD_MODE1))
4991 		return -EINVAL;
4992 
4993 	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4994 			adev->pdev->bus->number, 1);
4995 	if (!p)
4996 		return -ENODEV;
4997 
4998 	expires = pm_runtime_autosuspend_expiration(&(p->dev));
4999 	if (!expires)
5000 		/*
5001 		 * If we cannot get the audio device autosuspend delay,
5002 		 * a fixed 4S interval will be used. Considering 3S is
5003 		 * the audio controller default autosuspend delay setting.
5004 		 * 4S used here is guaranteed to cover that.
5005 		 */
5006 		expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5007 
5008 	while (!pm_runtime_status_suspended(&(p->dev))) {
5009 		if (!pm_runtime_suspend(&(p->dev)))
5010 			break;
5011 
5012 		if (expires < ktime_get_mono_fast_ns()) {
5013 			dev_warn(adev->dev, "failed to suspend display audio\n");
5014 			/* TODO: abort the succeeding gpu reset? */
5015 			return -ETIMEDOUT;
5016 		}
5017 	}
5018 
5019 	pm_runtime_disable(&(p->dev));
5020 
5021 	return 0;
5022 }
5023 
5024 static void amdgpu_device_recheck_guilty_jobs(
5025 	struct amdgpu_device *adev, struct list_head *device_list_handle,
5026 	struct amdgpu_reset_context *reset_context)
5027 {
5028 	int i, r = 0;
5029 
5030 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5031 		struct amdgpu_ring *ring = adev->rings[i];
5032 		int ret = 0;
5033 		struct drm_sched_job *s_job;
5034 
5035 		if (!ring || !ring->sched.thread)
5036 			continue;
5037 
5038 		s_job = list_first_entry_or_null(&ring->sched.pending_list,
5039 				struct drm_sched_job, list);
5040 		if (s_job == NULL)
5041 			continue;
5042 
5043 		/* clear job's guilty and depend the folowing step to decide the real one */
5044 		drm_sched_reset_karma(s_job);
5045 		drm_sched_resubmit_jobs_ext(&ring->sched, 1);
5046 
5047 		if (!s_job->s_fence->parent) {
5048 			DRM_WARN("Failed to get a HW fence for job!");
5049 			continue;
5050 		}
5051 
5052 		ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
5053 		if (ret == 0) { /* timeout */
5054 			DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
5055 						ring->sched.name, s_job->id);
5056 
5057 
5058 			amdgpu_fence_driver_isr_toggle(adev, true);
5059 
5060 			/* Clear this failed job from fence array */
5061 			amdgpu_fence_driver_clear_job_fences(ring);
5062 
5063 			amdgpu_fence_driver_isr_toggle(adev, false);
5064 
5065 			/* Since the job won't signal and we go for
5066 			 * another resubmit drop this parent pointer
5067 			 */
5068 			dma_fence_put(s_job->s_fence->parent);
5069 			s_job->s_fence->parent = NULL;
5070 
5071 			/* set guilty */
5072 			drm_sched_increase_karma(s_job);
5073 			amdgpu_reset_prepare_hwcontext(adev, reset_context);
5074 retry:
5075 			/* do hw reset */
5076 			if (amdgpu_sriov_vf(adev)) {
5077 				amdgpu_virt_fini_data_exchange(adev);
5078 				r = amdgpu_device_reset_sriov(adev, false);
5079 				if (r)
5080 					adev->asic_reset_res = r;
5081 			} else {
5082 				clear_bit(AMDGPU_SKIP_HW_RESET,
5083 					  &reset_context->flags);
5084 				r = amdgpu_do_asic_reset(device_list_handle,
5085 							 reset_context);
5086 				if (r && r == -EAGAIN)
5087 					goto retry;
5088 			}
5089 
5090 			/*
5091 			 * add reset counter so that the following
5092 			 * resubmitted job could flush vmid
5093 			 */
5094 			atomic_inc(&adev->gpu_reset_counter);
5095 			continue;
5096 		}
5097 
5098 		/* got the hw fence, signal finished fence */
5099 		atomic_dec(ring->sched.score);
5100 		dma_fence_get(&s_job->s_fence->finished);
5101 		dma_fence_signal(&s_job->s_fence->finished);
5102 		dma_fence_put(&s_job->s_fence->finished);
5103 
5104 		/* remove node from list and free the job */
5105 		spin_lock(&ring->sched.job_list_lock);
5106 		list_del_init(&s_job->list);
5107 		spin_unlock(&ring->sched.job_list_lock);
5108 		ring->sched.ops->free_job(s_job);
5109 	}
5110 }
5111 
5112 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5113 {
5114 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5115 
5116 #if defined(CONFIG_DEBUG_FS)
5117 	if (!amdgpu_sriov_vf(adev))
5118 		cancel_work(&adev->reset_work);
5119 #endif
5120 
5121 	if (adev->kfd.dev)
5122 		cancel_work(&adev->kfd.reset_work);
5123 
5124 	if (amdgpu_sriov_vf(adev))
5125 		cancel_work(&adev->virt.flr_work);
5126 
5127 	if (con && adev->ras_enabled)
5128 		cancel_work(&con->recovery_work);
5129 
5130 }
5131 
5132 
5133 /**
5134  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5135  *
5136  * @adev: amdgpu_device pointer
5137  * @job: which job trigger hang
5138  *
5139  * Attempt to reset the GPU if it has hung (all asics).
5140  * Attempt to do soft-reset or full-reset and reinitialize Asic
5141  * Returns 0 for success or an error on failure.
5142  */
5143 
5144 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5145 			      struct amdgpu_job *job,
5146 			      struct amdgpu_reset_context *reset_context)
5147 {
5148 	struct list_head device_list, *device_list_handle =  NULL;
5149 	bool job_signaled = false;
5150 	struct amdgpu_hive_info *hive = NULL;
5151 	struct amdgpu_device *tmp_adev = NULL;
5152 	int i, r = 0;
5153 	bool need_emergency_restart = false;
5154 	bool audio_suspended = false;
5155 	int tmp_vram_lost_counter;
5156 	bool gpu_reset_for_dev_remove = false;
5157 
5158 	gpu_reset_for_dev_remove =
5159 			test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
5160 				test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5161 
5162 	/*
5163 	 * Special case: RAS triggered and full reset isn't supported
5164 	 */
5165 	need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5166 
5167 	/*
5168 	 * Flush RAM to disk so that after reboot
5169 	 * the user can read log and see why the system rebooted.
5170 	 */
5171 	if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
5172 		DRM_WARN("Emergency reboot.");
5173 
5174 		ksys_sync_helper();
5175 		emergency_restart();
5176 	}
5177 
5178 	dev_info(adev->dev, "GPU %s begin!\n",
5179 		need_emergency_restart ? "jobs stop":"reset");
5180 
5181 	if (!amdgpu_sriov_vf(adev))
5182 		hive = amdgpu_get_xgmi_hive(adev);
5183 	if (hive)
5184 		mutex_lock(&hive->hive_lock);
5185 
5186 	reset_context->job = job;
5187 	reset_context->hive = hive;
5188 
5189 	/*
5190 	 * Build list of devices to reset.
5191 	 * In case we are in XGMI hive mode, resort the device list
5192 	 * to put adev in the 1st position.
5193 	 */
5194 	INIT_LIST_HEAD(&device_list);
5195 	if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5196 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5197 			list_add_tail(&tmp_adev->reset_list, &device_list);
5198 			if (gpu_reset_for_dev_remove && adev->shutdown)
5199 				tmp_adev->shutdown = true;
5200 		}
5201 		if (!list_is_first(&adev->reset_list, &device_list))
5202 			list_rotate_to_front(&adev->reset_list, &device_list);
5203 		device_list_handle = &device_list;
5204 	} else {
5205 		list_add_tail(&adev->reset_list, &device_list);
5206 		device_list_handle = &device_list;
5207 	}
5208 
5209 	/* We need to lock reset domain only once both for XGMI and single device */
5210 	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5211 				    reset_list);
5212 	amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5213 
5214 	/* block all schedulers and reset given job's ring */
5215 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5216 
5217 		amdgpu_device_set_mp1_state(tmp_adev);
5218 
5219 		/*
5220 		 * Try to put the audio codec into suspend state
5221 		 * before gpu reset started.
5222 		 *
5223 		 * Due to the power domain of the graphics device
5224 		 * is shared with AZ power domain. Without this,
5225 		 * we may change the audio hardware from behind
5226 		 * the audio driver's back. That will trigger
5227 		 * some audio codec errors.
5228 		 */
5229 		if (!amdgpu_device_suspend_display_audio(tmp_adev))
5230 			audio_suspended = true;
5231 
5232 		amdgpu_ras_set_error_query_ready(tmp_adev, false);
5233 
5234 		cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5235 
5236 		if (!amdgpu_sriov_vf(tmp_adev))
5237 			amdgpu_amdkfd_pre_reset(tmp_adev);
5238 
5239 		/*
5240 		 * Mark these ASICs to be reseted as untracked first
5241 		 * And add them back after reset completed
5242 		 */
5243 		amdgpu_unregister_gpu_instance(tmp_adev);
5244 
5245 		drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5246 
5247 		/* disable ras on ALL IPs */
5248 		if (!need_emergency_restart &&
5249 		      amdgpu_device_ip_need_full_reset(tmp_adev))
5250 			amdgpu_ras_suspend(tmp_adev);
5251 
5252 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5253 			struct amdgpu_ring *ring = tmp_adev->rings[i];
5254 
5255 			if (!ring || !ring->sched.thread)
5256 				continue;
5257 
5258 			drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5259 
5260 			if (need_emergency_restart)
5261 				amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5262 		}
5263 		atomic_inc(&tmp_adev->gpu_reset_counter);
5264 	}
5265 
5266 	if (need_emergency_restart)
5267 		goto skip_sched_resume;
5268 
5269 	/*
5270 	 * Must check guilty signal here since after this point all old
5271 	 * HW fences are force signaled.
5272 	 *
5273 	 * job->base holds a reference to parent fence
5274 	 */
5275 	if (job && dma_fence_is_signaled(&job->hw_fence)) {
5276 		job_signaled = true;
5277 		dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5278 		goto skip_hw_reset;
5279 	}
5280 
5281 retry:	/* Rest of adevs pre asic reset from XGMI hive. */
5282 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5283 		if (gpu_reset_for_dev_remove) {
5284 			/* Workaroud for ASICs need to disable SMC first */
5285 			amdgpu_device_smu_fini_early(tmp_adev);
5286 		}
5287 		r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5288 		/*TODO Should we stop ?*/
5289 		if (r) {
5290 			dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5291 				  r, adev_to_drm(tmp_adev)->unique);
5292 			tmp_adev->asic_reset_res = r;
5293 		}
5294 
5295 		/*
5296 		 * Drop all pending non scheduler resets. Scheduler resets
5297 		 * were already dropped during drm_sched_stop
5298 		 */
5299 		amdgpu_device_stop_pending_resets(tmp_adev);
5300 	}
5301 
5302 	tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
5303 	/* Actual ASIC resets if needed.*/
5304 	/* Host driver will handle XGMI hive reset for SRIOV */
5305 	if (amdgpu_sriov_vf(adev)) {
5306 		r = amdgpu_device_reset_sriov(adev, job ? false : true);
5307 		if (r)
5308 			adev->asic_reset_res = r;
5309 
5310 		/* Aldebaran supports ras in SRIOV, so need resume ras during reset */
5311 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
5312 			amdgpu_ras_resume(adev);
5313 	} else {
5314 		r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5315 		if (r && r == -EAGAIN) {
5316 			set_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags);
5317 			adev->asic_reset_res = 0;
5318 			goto retry;
5319 		}
5320 
5321 		if (!r && gpu_reset_for_dev_remove)
5322 			goto recover_end;
5323 	}
5324 
5325 skip_hw_reset:
5326 
5327 	/* Post ASIC reset for all devs .*/
5328 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5329 
5330 		/*
5331 		 * Sometimes a later bad compute job can block a good gfx job as gfx
5332 		 * and compute ring share internal GC HW mutually. We add an additional
5333 		 * guilty jobs recheck step to find the real guilty job, it synchronously
5334 		 * submits and pends for the first job being signaled. If it gets timeout,
5335 		 * we identify it as a real guilty job.
5336 		 */
5337 		if (amdgpu_gpu_recovery == 2 &&
5338 			!(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
5339 			amdgpu_device_recheck_guilty_jobs(
5340 				tmp_adev, device_list_handle, reset_context);
5341 
5342 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5343 			struct amdgpu_ring *ring = tmp_adev->rings[i];
5344 
5345 			if (!ring || !ring->sched.thread)
5346 				continue;
5347 
5348 			/* No point to resubmit jobs if we didn't HW reset*/
5349 			if (!tmp_adev->asic_reset_res && !job_signaled)
5350 				drm_sched_resubmit_jobs(&ring->sched);
5351 
5352 			drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
5353 		}
5354 
5355 		if (adev->enable_mes)
5356 			amdgpu_mes_self_test(tmp_adev);
5357 
5358 		if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
5359 			drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5360 		}
5361 
5362 		if (tmp_adev->asic_reset_res)
5363 			r = tmp_adev->asic_reset_res;
5364 
5365 		tmp_adev->asic_reset_res = 0;
5366 
5367 		if (r) {
5368 			/* bad news, how to tell it to userspace ? */
5369 			dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5370 			amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5371 		} else {
5372 			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5373 			if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5374 				DRM_WARN("smart shift update failed\n");
5375 		}
5376 	}
5377 
5378 skip_sched_resume:
5379 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5380 		/* unlock kfd: SRIOV would do it separately */
5381 		if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5382 			amdgpu_amdkfd_post_reset(tmp_adev);
5383 
5384 		/* kfd_post_reset will do nothing if kfd device is not initialized,
5385 		 * need to bring up kfd here if it's not be initialized before
5386 		 */
5387 		if (!adev->kfd.init_complete)
5388 			amdgpu_amdkfd_device_init(adev);
5389 
5390 		if (audio_suspended)
5391 			amdgpu_device_resume_display_audio(tmp_adev);
5392 
5393 		amdgpu_device_unset_mp1_state(tmp_adev);
5394 	}
5395 
5396 recover_end:
5397 	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5398 					    reset_list);
5399 	amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5400 
5401 	if (hive) {
5402 		mutex_unlock(&hive->hive_lock);
5403 		amdgpu_put_xgmi_hive(hive);
5404 	}
5405 
5406 	if (r)
5407 		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5408 
5409 	atomic_set(&adev->reset_domain->reset_res, r);
5410 	return r;
5411 }
5412 
5413 /**
5414  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5415  *
5416  * @adev: amdgpu_device pointer
5417  *
5418  * Fetchs and stores in the driver the PCIE capabilities (gen speed
5419  * and lanes) of the slot the device is in. Handles APUs and
5420  * virtualized environments where PCIE config space may not be available.
5421  */
5422 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5423 {
5424 	struct pci_dev *pdev;
5425 	enum pci_bus_speed speed_cap, platform_speed_cap;
5426 	enum pcie_link_width platform_link_width;
5427 
5428 	if (amdgpu_pcie_gen_cap)
5429 		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5430 
5431 	if (amdgpu_pcie_lane_cap)
5432 		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5433 
5434 	/* covers APUs as well */
5435 	if (pci_is_root_bus(adev->pdev->bus)) {
5436 		if (adev->pm.pcie_gen_mask == 0)
5437 			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5438 		if (adev->pm.pcie_mlw_mask == 0)
5439 			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5440 		return;
5441 	}
5442 
5443 	if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5444 		return;
5445 
5446 	pcie_bandwidth_available(adev->pdev, NULL,
5447 				 &platform_speed_cap, &platform_link_width);
5448 
5449 	if (adev->pm.pcie_gen_mask == 0) {
5450 		/* asic caps */
5451 		pdev = adev->pdev;
5452 		speed_cap = pcie_get_speed_cap(pdev);
5453 		if (speed_cap == PCI_SPEED_UNKNOWN) {
5454 			adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5455 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5456 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5457 		} else {
5458 			if (speed_cap == PCIE_SPEED_32_0GT)
5459 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5460 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5461 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5462 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5463 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5464 			else if (speed_cap == PCIE_SPEED_16_0GT)
5465 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5466 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5467 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5468 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5469 			else if (speed_cap == PCIE_SPEED_8_0GT)
5470 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5471 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5472 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5473 			else if (speed_cap == PCIE_SPEED_5_0GT)
5474 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5475 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5476 			else
5477 				adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5478 		}
5479 		/* platform caps */
5480 		if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5481 			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5482 						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5483 		} else {
5484 			if (platform_speed_cap == PCIE_SPEED_32_0GT)
5485 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5486 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5487 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5488 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5489 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5490 			else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5491 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5492 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5493 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5494 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5495 			else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5496 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5497 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5498 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5499 			else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5500 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5501 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5502 			else
5503 				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5504 
5505 		}
5506 	}
5507 	if (adev->pm.pcie_mlw_mask == 0) {
5508 		if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5509 			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5510 		} else {
5511 			switch (platform_link_width) {
5512 			case PCIE_LNK_X32:
5513 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5514 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5515 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5516 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5517 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5518 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5519 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5520 				break;
5521 			case PCIE_LNK_X16:
5522 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5523 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5524 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5525 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5526 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5527 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5528 				break;
5529 			case PCIE_LNK_X12:
5530 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5531 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5532 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5533 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5534 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5535 				break;
5536 			case PCIE_LNK_X8:
5537 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5538 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5539 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5540 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5541 				break;
5542 			case PCIE_LNK_X4:
5543 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5544 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5545 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5546 				break;
5547 			case PCIE_LNK_X2:
5548 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5549 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5550 				break;
5551 			case PCIE_LNK_X1:
5552 				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5553 				break;
5554 			default:
5555 				break;
5556 			}
5557 		}
5558 	}
5559 }
5560 
5561 /**
5562  * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5563  *
5564  * @adev: amdgpu_device pointer
5565  * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5566  *
5567  * Return true if @peer_adev can access (DMA) @adev through the PCIe
5568  * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5569  * @peer_adev.
5570  */
5571 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5572 				      struct amdgpu_device *peer_adev)
5573 {
5574 #ifdef CONFIG_HSA_AMD_P2P
5575 	uint64_t address_mask = peer_adev->dev->dma_mask ?
5576 		~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5577 	resource_size_t aper_limit =
5578 		adev->gmc.aper_base + adev->gmc.aper_size - 1;
5579 	bool p2p_access =
5580 		!adev->gmc.xgmi.connected_to_cpu &&
5581 		!(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
5582 
5583 	return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5584 		adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5585 		!(adev->gmc.aper_base & address_mask ||
5586 		  aper_limit & address_mask));
5587 #else
5588 	return false;
5589 #endif
5590 }
5591 
5592 int amdgpu_device_baco_enter(struct drm_device *dev)
5593 {
5594 	struct amdgpu_device *adev = drm_to_adev(dev);
5595 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5596 
5597 	if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5598 		return -ENOTSUPP;
5599 
5600 	if (ras && adev->ras_enabled &&
5601 	    adev->nbio.funcs->enable_doorbell_interrupt)
5602 		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5603 
5604 	return amdgpu_dpm_baco_enter(adev);
5605 }
5606 
5607 int amdgpu_device_baco_exit(struct drm_device *dev)
5608 {
5609 	struct amdgpu_device *adev = drm_to_adev(dev);
5610 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5611 	int ret = 0;
5612 
5613 	if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5614 		return -ENOTSUPP;
5615 
5616 	ret = amdgpu_dpm_baco_exit(adev);
5617 	if (ret)
5618 		return ret;
5619 
5620 	if (ras && adev->ras_enabled &&
5621 	    adev->nbio.funcs->enable_doorbell_interrupt)
5622 		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5623 
5624 	if (amdgpu_passthrough(adev) &&
5625 	    adev->nbio.funcs->clear_doorbell_interrupt)
5626 		adev->nbio.funcs->clear_doorbell_interrupt(adev);
5627 
5628 	return 0;
5629 }
5630 
5631 /**
5632  * amdgpu_pci_error_detected - Called when a PCI error is detected.
5633  * @pdev: PCI device struct
5634  * @state: PCI channel state
5635  *
5636  * Description: Called when a PCI error is detected.
5637  *
5638  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5639  */
5640 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5641 {
5642 	struct drm_device *dev = pci_get_drvdata(pdev);
5643 	struct amdgpu_device *adev = drm_to_adev(dev);
5644 	int i;
5645 
5646 	DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5647 
5648 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
5649 		DRM_WARN("No support for XGMI hive yet...");
5650 		return PCI_ERS_RESULT_DISCONNECT;
5651 	}
5652 
5653 	adev->pci_channel_state = state;
5654 
5655 	switch (state) {
5656 	case pci_channel_io_normal:
5657 		return PCI_ERS_RESULT_CAN_RECOVER;
5658 	/* Fatal error, prepare for slot reset */
5659 	case pci_channel_io_frozen:
5660 		/*
5661 		 * Locking adev->reset_domain->sem will prevent any external access
5662 		 * to GPU during PCI error recovery
5663 		 */
5664 		amdgpu_device_lock_reset_domain(adev->reset_domain);
5665 		amdgpu_device_set_mp1_state(adev);
5666 
5667 		/*
5668 		 * Block any work scheduling as we do for regular GPU reset
5669 		 * for the duration of the recovery
5670 		 */
5671 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5672 			struct amdgpu_ring *ring = adev->rings[i];
5673 
5674 			if (!ring || !ring->sched.thread)
5675 				continue;
5676 
5677 			drm_sched_stop(&ring->sched, NULL);
5678 		}
5679 		atomic_inc(&adev->gpu_reset_counter);
5680 		return PCI_ERS_RESULT_NEED_RESET;
5681 	case pci_channel_io_perm_failure:
5682 		/* Permanent error, prepare for device removal */
5683 		return PCI_ERS_RESULT_DISCONNECT;
5684 	}
5685 
5686 	return PCI_ERS_RESULT_NEED_RESET;
5687 }
5688 
5689 /**
5690  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5691  * @pdev: pointer to PCI device
5692  */
5693 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5694 {
5695 
5696 	DRM_INFO("PCI error: mmio enabled callback!!\n");
5697 
5698 	/* TODO - dump whatever for debugging purposes */
5699 
5700 	/* This called only if amdgpu_pci_error_detected returns
5701 	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5702 	 * works, no need to reset slot.
5703 	 */
5704 
5705 	return PCI_ERS_RESULT_RECOVERED;
5706 }
5707 
5708 /**
5709  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5710  * @pdev: PCI device struct
5711  *
5712  * Description: This routine is called by the pci error recovery
5713  * code after the PCI slot has been reset, just before we
5714  * should resume normal operations.
5715  */
5716 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5717 {
5718 	struct drm_device *dev = pci_get_drvdata(pdev);
5719 	struct amdgpu_device *adev = drm_to_adev(dev);
5720 	int r, i;
5721 	struct amdgpu_reset_context reset_context;
5722 	u32 memsize;
5723 	struct list_head device_list;
5724 
5725 	DRM_INFO("PCI error: slot reset callback!!\n");
5726 
5727 	memset(&reset_context, 0, sizeof(reset_context));
5728 
5729 	INIT_LIST_HEAD(&device_list);
5730 	list_add_tail(&adev->reset_list, &device_list);
5731 
5732 	/* wait for asic to come out of reset */
5733 	msleep(500);
5734 
5735 	/* Restore PCI confspace */
5736 	amdgpu_device_load_pci_state(pdev);
5737 
5738 	/* confirm  ASIC came out of reset */
5739 	for (i = 0; i < adev->usec_timeout; i++) {
5740 		memsize = amdgpu_asic_get_config_memsize(adev);
5741 
5742 		if (memsize != 0xffffffff)
5743 			break;
5744 		udelay(1);
5745 	}
5746 	if (memsize == 0xffffffff) {
5747 		r = -ETIME;
5748 		goto out;
5749 	}
5750 
5751 	reset_context.method = AMD_RESET_METHOD_NONE;
5752 	reset_context.reset_req_dev = adev;
5753 	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5754 	set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5755 	set_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
5756 
5757 	adev->no_hw_access = true;
5758 	r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5759 	adev->no_hw_access = false;
5760 	if (r)
5761 		goto out;
5762 
5763 	r = amdgpu_do_asic_reset(&device_list, &reset_context);
5764 
5765 out:
5766 	if (!r) {
5767 		if (amdgpu_device_cache_pci_state(adev->pdev))
5768 			pci_restore_state(adev->pdev);
5769 
5770 		DRM_INFO("PCIe error recovery succeeded\n");
5771 	} else {
5772 		DRM_ERROR("PCIe error recovery failed, err:%d", r);
5773 		amdgpu_device_unset_mp1_state(adev);
5774 		amdgpu_device_unlock_reset_domain(adev->reset_domain);
5775 	}
5776 
5777 	return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5778 }
5779 
5780 /**
5781  * amdgpu_pci_resume() - resume normal ops after PCI reset
5782  * @pdev: pointer to PCI device
5783  *
5784  * Called when the error recovery driver tells us that its
5785  * OK to resume normal operation.
5786  */
5787 void amdgpu_pci_resume(struct pci_dev *pdev)
5788 {
5789 	struct drm_device *dev = pci_get_drvdata(pdev);
5790 	struct amdgpu_device *adev = drm_to_adev(dev);
5791 	int i;
5792 
5793 
5794 	DRM_INFO("PCI error: resume callback!!\n");
5795 
5796 	/* Only continue execution for the case of pci_channel_io_frozen */
5797 	if (adev->pci_channel_state != pci_channel_io_frozen)
5798 		return;
5799 
5800 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5801 		struct amdgpu_ring *ring = adev->rings[i];
5802 
5803 		if (!ring || !ring->sched.thread)
5804 			continue;
5805 
5806 
5807 		drm_sched_resubmit_jobs(&ring->sched);
5808 		drm_sched_start(&ring->sched, true);
5809 	}
5810 
5811 	amdgpu_device_unset_mp1_state(adev);
5812 	amdgpu_device_unlock_reset_domain(adev->reset_domain);
5813 }
5814 
5815 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5816 {
5817 	struct drm_device *dev = pci_get_drvdata(pdev);
5818 	struct amdgpu_device *adev = drm_to_adev(dev);
5819 	int r;
5820 
5821 	r = pci_save_state(pdev);
5822 	if (!r) {
5823 		kfree(adev->pci_state);
5824 
5825 		adev->pci_state = pci_store_saved_state(pdev);
5826 
5827 		if (!adev->pci_state) {
5828 			DRM_ERROR("Failed to store PCI saved state");
5829 			return false;
5830 		}
5831 	} else {
5832 		DRM_WARN("Failed to save PCI state, err:%d\n", r);
5833 		return false;
5834 	}
5835 
5836 	return true;
5837 }
5838 
5839 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5840 {
5841 	struct drm_device *dev = pci_get_drvdata(pdev);
5842 	struct amdgpu_device *adev = drm_to_adev(dev);
5843 	int r;
5844 
5845 	if (!adev->pci_state)
5846 		return false;
5847 
5848 	r = pci_load_saved_state(pdev, adev->pci_state);
5849 
5850 	if (!r) {
5851 		pci_restore_state(pdev);
5852 	} else {
5853 		DRM_WARN("Failed to load PCI state, err:%d\n", r);
5854 		return false;
5855 	}
5856 
5857 	return true;
5858 }
5859 
5860 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5861 		struct amdgpu_ring *ring)
5862 {
5863 #ifdef CONFIG_X86_64
5864 	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5865 		return;
5866 #endif
5867 	if (adev->gmc.xgmi.connected_to_cpu)
5868 		return;
5869 
5870 	if (ring && ring->funcs->emit_hdp_flush)
5871 		amdgpu_ring_emit_hdp_flush(ring);
5872 	else
5873 		amdgpu_asic_flush_hdp(adev, ring);
5874 }
5875 
5876 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5877 		struct amdgpu_ring *ring)
5878 {
5879 #ifdef CONFIG_X86_64
5880 	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5881 		return;
5882 #endif
5883 	if (adev->gmc.xgmi.connected_to_cpu)
5884 		return;
5885 
5886 	amdgpu_asic_invalidate_hdp(adev, ring);
5887 }
5888 
5889 int amdgpu_in_reset(struct amdgpu_device *adev)
5890 {
5891 	return atomic_read(&adev->reset_domain->in_gpu_reset);
5892 	}
5893 
5894 /**
5895  * amdgpu_device_halt() - bring hardware to some kind of halt state
5896  *
5897  * @adev: amdgpu_device pointer
5898  *
5899  * Bring hardware to some kind of halt state so that no one can touch it
5900  * any more. It will help to maintain error context when error occurred.
5901  * Compare to a simple hang, the system will keep stable at least for SSH
5902  * access. Then it should be trivial to inspect the hardware state and
5903  * see what's going on. Implemented as following:
5904  *
5905  * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
5906  *    clears all CPU mappings to device, disallows remappings through page faults
5907  * 2. amdgpu_irq_disable_all() disables all interrupts
5908  * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
5909  * 4. set adev->no_hw_access to avoid potential crashes after setp 5
5910  * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
5911  * 6. pci_disable_device() and pci_wait_for_pending_transaction()
5912  *    flush any in flight DMA operations
5913  */
5914 void amdgpu_device_halt(struct amdgpu_device *adev)
5915 {
5916 	struct pci_dev *pdev = adev->pdev;
5917 	struct drm_device *ddev = adev_to_drm(adev);
5918 
5919 	drm_dev_unplug(ddev);
5920 
5921 	amdgpu_irq_disable_all(adev);
5922 
5923 	amdgpu_fence_driver_hw_fini(adev);
5924 
5925 	adev->no_hw_access = true;
5926 
5927 	amdgpu_device_unmap_mmio(adev);
5928 
5929 	pci_disable_device(pdev);
5930 	pci_wait_for_pending_transaction(pdev);
5931 }
5932 
5933 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
5934 				u32 reg)
5935 {
5936 	unsigned long flags, address, data;
5937 	u32 r;
5938 
5939 	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5940 	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5941 
5942 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5943 	WREG32(address, reg * 4);
5944 	(void)RREG32(address);
5945 	r = RREG32(data);
5946 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5947 	return r;
5948 }
5949 
5950 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
5951 				u32 reg, u32 v)
5952 {
5953 	unsigned long flags, address, data;
5954 
5955 	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5956 	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5957 
5958 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5959 	WREG32(address, reg * 4);
5960 	(void)RREG32(address);
5961 	WREG32(data, v);
5962 	(void)RREG32(data);
5963 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5964 }
5965 
5966 /**
5967  * amdgpu_device_switch_gang - switch to a new gang
5968  * @adev: amdgpu_device pointer
5969  * @gang: the gang to switch to
5970  *
5971  * Try to switch to a new gang.
5972  * Returns: NULL if we switched to the new gang or a reference to the current
5973  * gang leader.
5974  */
5975 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
5976 					    struct dma_fence *gang)
5977 {
5978 	struct dma_fence *old = NULL;
5979 
5980 	do {
5981 		dma_fence_put(old);
5982 		rcu_read_lock();
5983 		old = dma_fence_get_rcu_safe(&adev->gang_submit);
5984 		rcu_read_unlock();
5985 
5986 		if (old == gang)
5987 			break;
5988 
5989 		if (!dma_fence_is_signaled(old))
5990 			return old;
5991 
5992 	} while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
5993 			 old, gang) != old);
5994 
5995 	dma_fence_put(old);
5996 	return NULL;
5997 }
5998