xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c (revision 56ea353ea49ad21dd4c14e7baa235493ec27e766)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
35 #include <linux/devcoredump.h>
36 #include <generated/utsrelease.h>
37 #include <linux/pci-p2pdma.h>
38 
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_probe_helper.h>
41 #include <drm/amdgpu_drm.h>
42 #include <linux/vgaarb.h>
43 #include <linux/vga_switcheroo.h>
44 #include <linux/efi.h>
45 #include "amdgpu.h"
46 #include "amdgpu_trace.h"
47 #include "amdgpu_i2c.h"
48 #include "atom.h"
49 #include "amdgpu_atombios.h"
50 #include "amdgpu_atomfirmware.h"
51 #include "amd_pcie.h"
52 #ifdef CONFIG_DRM_AMDGPU_SI
53 #include "si.h"
54 #endif
55 #ifdef CONFIG_DRM_AMDGPU_CIK
56 #include "cik.h"
57 #endif
58 #include "vi.h"
59 #include "soc15.h"
60 #include "nv.h"
61 #include "bif/bif_4_1_d.h"
62 #include <linux/firmware.h>
63 #include "amdgpu_vf_error.h"
64 
65 #include "amdgpu_amdkfd.h"
66 #include "amdgpu_pm.h"
67 
68 #include "amdgpu_xgmi.h"
69 #include "amdgpu_ras.h"
70 #include "amdgpu_pmu.h"
71 #include "amdgpu_fru_eeprom.h"
72 #include "amdgpu_reset.h"
73 
74 #include <linux/suspend.h>
75 #include <drm/task_barrier.h>
76 #include <linux/pm_runtime.h>
77 
78 #include <drm/drm_drv.h>
79 
80 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
85 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
86 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
87 
88 #define AMDGPU_RESUME_MS		2000
89 #define AMDGPU_MAX_RETRY_LIMIT		2
90 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
91 
92 const char *amdgpu_asic_name[] = {
93 	"TAHITI",
94 	"PITCAIRN",
95 	"VERDE",
96 	"OLAND",
97 	"HAINAN",
98 	"BONAIRE",
99 	"KAVERI",
100 	"KABINI",
101 	"HAWAII",
102 	"MULLINS",
103 	"TOPAZ",
104 	"TONGA",
105 	"FIJI",
106 	"CARRIZO",
107 	"STONEY",
108 	"POLARIS10",
109 	"POLARIS11",
110 	"POLARIS12",
111 	"VEGAM",
112 	"VEGA10",
113 	"VEGA12",
114 	"VEGA20",
115 	"RAVEN",
116 	"ARCTURUS",
117 	"RENOIR",
118 	"ALDEBARAN",
119 	"NAVI10",
120 	"CYAN_SKILLFISH",
121 	"NAVI14",
122 	"NAVI12",
123 	"SIENNA_CICHLID",
124 	"NAVY_FLOUNDER",
125 	"VANGOGH",
126 	"DIMGREY_CAVEFISH",
127 	"BEIGE_GOBY",
128 	"YELLOW_CARP",
129 	"IP DISCOVERY",
130 	"LAST",
131 };
132 
133 /**
134  * DOC: pcie_replay_count
135  *
136  * The amdgpu driver provides a sysfs API for reporting the total number
137  * of PCIe replays (NAKs)
138  * The file pcie_replay_count is used for this and returns the total
139  * number of replays as a sum of the NAKs generated and NAKs received
140  */
141 
142 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
143 		struct device_attribute *attr, char *buf)
144 {
145 	struct drm_device *ddev = dev_get_drvdata(dev);
146 	struct amdgpu_device *adev = drm_to_adev(ddev);
147 	uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
148 
149 	return sysfs_emit(buf, "%llu\n", cnt);
150 }
151 
152 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
153 		amdgpu_device_get_pcie_replay_count, NULL);
154 
155 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
156 
157 /**
158  * DOC: product_name
159  *
160  * The amdgpu driver provides a sysfs API for reporting the product name
161  * for the device
162  * The file serial_number is used for this and returns the product name
163  * as returned from the FRU.
164  * NOTE: This is only available for certain server cards
165  */
166 
167 static ssize_t amdgpu_device_get_product_name(struct device *dev,
168 		struct device_attribute *attr, char *buf)
169 {
170 	struct drm_device *ddev = dev_get_drvdata(dev);
171 	struct amdgpu_device *adev = drm_to_adev(ddev);
172 
173 	return sysfs_emit(buf, "%s\n", adev->product_name);
174 }
175 
176 static DEVICE_ATTR(product_name, S_IRUGO,
177 		amdgpu_device_get_product_name, NULL);
178 
179 /**
180  * DOC: product_number
181  *
182  * The amdgpu driver provides a sysfs API for reporting the part number
183  * for the device
184  * The file serial_number is used for this and returns the part number
185  * as returned from the FRU.
186  * NOTE: This is only available for certain server cards
187  */
188 
189 static ssize_t amdgpu_device_get_product_number(struct device *dev,
190 		struct device_attribute *attr, char *buf)
191 {
192 	struct drm_device *ddev = dev_get_drvdata(dev);
193 	struct amdgpu_device *adev = drm_to_adev(ddev);
194 
195 	return sysfs_emit(buf, "%s\n", adev->product_number);
196 }
197 
198 static DEVICE_ATTR(product_number, S_IRUGO,
199 		amdgpu_device_get_product_number, NULL);
200 
201 /**
202  * DOC: serial_number
203  *
204  * The amdgpu driver provides a sysfs API for reporting the serial number
205  * for the device
206  * The file serial_number is used for this and returns the serial number
207  * as returned from the FRU.
208  * NOTE: This is only available for certain server cards
209  */
210 
211 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
212 		struct device_attribute *attr, char *buf)
213 {
214 	struct drm_device *ddev = dev_get_drvdata(dev);
215 	struct amdgpu_device *adev = drm_to_adev(ddev);
216 
217 	return sysfs_emit(buf, "%s\n", adev->serial);
218 }
219 
220 static DEVICE_ATTR(serial_number, S_IRUGO,
221 		amdgpu_device_get_serial_number, NULL);
222 
223 /**
224  * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
225  *
226  * @dev: drm_device pointer
227  *
228  * Returns true if the device is a dGPU with ATPX power control,
229  * otherwise return false.
230  */
231 bool amdgpu_device_supports_px(struct drm_device *dev)
232 {
233 	struct amdgpu_device *adev = drm_to_adev(dev);
234 
235 	if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
236 		return true;
237 	return false;
238 }
239 
240 /**
241  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
242  *
243  * @dev: drm_device pointer
244  *
245  * Returns true if the device is a dGPU with ACPI power control,
246  * otherwise return false.
247  */
248 bool amdgpu_device_supports_boco(struct drm_device *dev)
249 {
250 	struct amdgpu_device *adev = drm_to_adev(dev);
251 
252 	if (adev->has_pr3 ||
253 	    ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
254 		return true;
255 	return false;
256 }
257 
258 /**
259  * amdgpu_device_supports_baco - Does the device support BACO
260  *
261  * @dev: drm_device pointer
262  *
263  * Returns true if the device supporte BACO,
264  * otherwise return false.
265  */
266 bool amdgpu_device_supports_baco(struct drm_device *dev)
267 {
268 	struct amdgpu_device *adev = drm_to_adev(dev);
269 
270 	return amdgpu_asic_supports_baco(adev);
271 }
272 
273 /**
274  * amdgpu_device_supports_smart_shift - Is the device dGPU with
275  * smart shift support
276  *
277  * @dev: drm_device pointer
278  *
279  * Returns true if the device is a dGPU with Smart Shift support,
280  * otherwise returns false.
281  */
282 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
283 {
284 	return (amdgpu_device_supports_boco(dev) &&
285 		amdgpu_acpi_is_power_shift_control_supported());
286 }
287 
288 /*
289  * VRAM access helper functions
290  */
291 
292 /**
293  * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
294  *
295  * @adev: amdgpu_device pointer
296  * @pos: offset of the buffer in vram
297  * @buf: virtual address of the buffer in system memory
298  * @size: read/write size, sizeof(@buf) must > @size
299  * @write: true - write to vram, otherwise - read from vram
300  */
301 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
302 			     void *buf, size_t size, bool write)
303 {
304 	unsigned long flags;
305 	uint32_t hi = ~0, tmp = 0;
306 	uint32_t *data = buf;
307 	uint64_t last;
308 	int idx;
309 
310 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
311 		return;
312 
313 	BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
314 
315 	spin_lock_irqsave(&adev->mmio_idx_lock, flags);
316 	for (last = pos + size; pos < last; pos += 4) {
317 		tmp = pos >> 31;
318 
319 		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
320 		if (tmp != hi) {
321 			WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
322 			hi = tmp;
323 		}
324 		if (write)
325 			WREG32_NO_KIQ(mmMM_DATA, *data++);
326 		else
327 			*data++ = RREG32_NO_KIQ(mmMM_DATA);
328 	}
329 
330 	spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
331 	drm_dev_exit(idx);
332 }
333 
334 /**
335  * amdgpu_device_aper_access - access vram by vram aperature
336  *
337  * @adev: amdgpu_device pointer
338  * @pos: offset of the buffer in vram
339  * @buf: virtual address of the buffer in system memory
340  * @size: read/write size, sizeof(@buf) must > @size
341  * @write: true - write to vram, otherwise - read from vram
342  *
343  * The return value means how many bytes have been transferred.
344  */
345 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
346 				 void *buf, size_t size, bool write)
347 {
348 #ifdef CONFIG_64BIT
349 	void __iomem *addr;
350 	size_t count = 0;
351 	uint64_t last;
352 
353 	if (!adev->mman.aper_base_kaddr)
354 		return 0;
355 
356 	last = min(pos + size, adev->gmc.visible_vram_size);
357 	if (last > pos) {
358 		addr = adev->mman.aper_base_kaddr + pos;
359 		count = last - pos;
360 
361 		if (write) {
362 			memcpy_toio(addr, buf, count);
363 			mb();
364 			amdgpu_device_flush_hdp(adev, NULL);
365 		} else {
366 			amdgpu_device_invalidate_hdp(adev, NULL);
367 			mb();
368 			memcpy_fromio(buf, addr, count);
369 		}
370 
371 	}
372 
373 	return count;
374 #else
375 	return 0;
376 #endif
377 }
378 
379 /**
380  * amdgpu_device_vram_access - read/write a buffer in vram
381  *
382  * @adev: amdgpu_device pointer
383  * @pos: offset of the buffer in vram
384  * @buf: virtual address of the buffer in system memory
385  * @size: read/write size, sizeof(@buf) must > @size
386  * @write: true - write to vram, otherwise - read from vram
387  */
388 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
389 			       void *buf, size_t size, bool write)
390 {
391 	size_t count;
392 
393 	/* try to using vram apreature to access vram first */
394 	count = amdgpu_device_aper_access(adev, pos, buf, size, write);
395 	size -= count;
396 	if (size) {
397 		/* using MM to access rest vram */
398 		pos += count;
399 		buf += count;
400 		amdgpu_device_mm_access(adev, pos, buf, size, write);
401 	}
402 }
403 
404 /*
405  * register access helper functions.
406  */
407 
408 /* Check if hw access should be skipped because of hotplug or device error */
409 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
410 {
411 	if (adev->no_hw_access)
412 		return true;
413 
414 #ifdef CONFIG_LOCKDEP
415 	/*
416 	 * This is a bit complicated to understand, so worth a comment. What we assert
417 	 * here is that the GPU reset is not running on another thread in parallel.
418 	 *
419 	 * For this we trylock the read side of the reset semaphore, if that succeeds
420 	 * we know that the reset is not running in paralell.
421 	 *
422 	 * If the trylock fails we assert that we are either already holding the read
423 	 * side of the lock or are the reset thread itself and hold the write side of
424 	 * the lock.
425 	 */
426 	if (in_task()) {
427 		if (down_read_trylock(&adev->reset_domain->sem))
428 			up_read(&adev->reset_domain->sem);
429 		else
430 			lockdep_assert_held(&adev->reset_domain->sem);
431 	}
432 #endif
433 	return false;
434 }
435 
436 /**
437  * amdgpu_device_rreg - read a memory mapped IO or indirect register
438  *
439  * @adev: amdgpu_device pointer
440  * @reg: dword aligned register offset
441  * @acc_flags: access flags which require special behavior
442  *
443  * Returns the 32 bit value from the offset specified.
444  */
445 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
446 			    uint32_t reg, uint32_t acc_flags)
447 {
448 	uint32_t ret;
449 
450 	if (amdgpu_device_skip_hw_access(adev))
451 		return 0;
452 
453 	if ((reg * 4) < adev->rmmio_size) {
454 		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
455 		    amdgpu_sriov_runtime(adev) &&
456 		    down_read_trylock(&adev->reset_domain->sem)) {
457 			ret = amdgpu_kiq_rreg(adev, reg);
458 			up_read(&adev->reset_domain->sem);
459 		} else {
460 			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
461 		}
462 	} else {
463 		ret = adev->pcie_rreg(adev, reg * 4);
464 	}
465 
466 	trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
467 
468 	return ret;
469 }
470 
471 /*
472  * MMIO register read with bytes helper functions
473  * @offset:bytes offset from MMIO start
474  *
475 */
476 
477 /**
478  * amdgpu_mm_rreg8 - read a memory mapped IO register
479  *
480  * @adev: amdgpu_device pointer
481  * @offset: byte aligned register offset
482  *
483  * Returns the 8 bit value from the offset specified.
484  */
485 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
486 {
487 	if (amdgpu_device_skip_hw_access(adev))
488 		return 0;
489 
490 	if (offset < adev->rmmio_size)
491 		return (readb(adev->rmmio + offset));
492 	BUG();
493 }
494 
495 /*
496  * MMIO register write with bytes helper functions
497  * @offset:bytes offset from MMIO start
498  * @value: the value want to be written to the register
499  *
500 */
501 /**
502  * amdgpu_mm_wreg8 - read a memory mapped IO register
503  *
504  * @adev: amdgpu_device pointer
505  * @offset: byte aligned register offset
506  * @value: 8 bit value to write
507  *
508  * Writes the value specified to the offset specified.
509  */
510 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
511 {
512 	if (amdgpu_device_skip_hw_access(adev))
513 		return;
514 
515 	if (offset < adev->rmmio_size)
516 		writeb(value, adev->rmmio + offset);
517 	else
518 		BUG();
519 }
520 
521 /**
522  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
523  *
524  * @adev: amdgpu_device pointer
525  * @reg: dword aligned register offset
526  * @v: 32 bit value to write to the register
527  * @acc_flags: access flags which require special behavior
528  *
529  * Writes the value specified to the offset specified.
530  */
531 void amdgpu_device_wreg(struct amdgpu_device *adev,
532 			uint32_t reg, uint32_t v,
533 			uint32_t acc_flags)
534 {
535 	if (amdgpu_device_skip_hw_access(adev))
536 		return;
537 
538 	if ((reg * 4) < adev->rmmio_size) {
539 		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
540 		    amdgpu_sriov_runtime(adev) &&
541 		    down_read_trylock(&adev->reset_domain->sem)) {
542 			amdgpu_kiq_wreg(adev, reg, v);
543 			up_read(&adev->reset_domain->sem);
544 		} else {
545 			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
546 		}
547 	} else {
548 		adev->pcie_wreg(adev, reg * 4, v);
549 	}
550 
551 	trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
552 }
553 
554 /**
555  * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
556  *
557  * @adev: amdgpu_device pointer
558  * @reg: mmio/rlc register
559  * @v: value to write
560  *
561  * this function is invoked only for the debugfs register access
562  */
563 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
564 			     uint32_t reg, uint32_t v)
565 {
566 	if (amdgpu_device_skip_hw_access(adev))
567 		return;
568 
569 	if (amdgpu_sriov_fullaccess(adev) &&
570 	    adev->gfx.rlc.funcs &&
571 	    adev->gfx.rlc.funcs->is_rlcg_access_range) {
572 		if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
573 			return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
574 	} else if ((reg * 4) >= adev->rmmio_size) {
575 		adev->pcie_wreg(adev, reg * 4, v);
576 	} else {
577 		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
578 	}
579 }
580 
581 /**
582  * amdgpu_mm_rdoorbell - read a doorbell dword
583  *
584  * @adev: amdgpu_device pointer
585  * @index: doorbell index
586  *
587  * Returns the value in the doorbell aperture at the
588  * requested doorbell index (CIK).
589  */
590 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
591 {
592 	if (amdgpu_device_skip_hw_access(adev))
593 		return 0;
594 
595 	if (index < adev->doorbell.num_doorbells) {
596 		return readl(adev->doorbell.ptr + index);
597 	} else {
598 		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
599 		return 0;
600 	}
601 }
602 
603 /**
604  * amdgpu_mm_wdoorbell - write a doorbell dword
605  *
606  * @adev: amdgpu_device pointer
607  * @index: doorbell index
608  * @v: value to write
609  *
610  * Writes @v to the doorbell aperture at the
611  * requested doorbell index (CIK).
612  */
613 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
614 {
615 	if (amdgpu_device_skip_hw_access(adev))
616 		return;
617 
618 	if (index < adev->doorbell.num_doorbells) {
619 		writel(v, adev->doorbell.ptr + index);
620 	} else {
621 		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
622 	}
623 }
624 
625 /**
626  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
627  *
628  * @adev: amdgpu_device pointer
629  * @index: doorbell index
630  *
631  * Returns the value in the doorbell aperture at the
632  * requested doorbell index (VEGA10+).
633  */
634 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
635 {
636 	if (amdgpu_device_skip_hw_access(adev))
637 		return 0;
638 
639 	if (index < adev->doorbell.num_doorbells) {
640 		return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
641 	} else {
642 		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
643 		return 0;
644 	}
645 }
646 
647 /**
648  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
649  *
650  * @adev: amdgpu_device pointer
651  * @index: doorbell index
652  * @v: value to write
653  *
654  * Writes @v to the doorbell aperture at the
655  * requested doorbell index (VEGA10+).
656  */
657 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
658 {
659 	if (amdgpu_device_skip_hw_access(adev))
660 		return;
661 
662 	if (index < adev->doorbell.num_doorbells) {
663 		atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
664 	} else {
665 		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
666 	}
667 }
668 
669 /**
670  * amdgpu_device_indirect_rreg - read an indirect register
671  *
672  * @adev: amdgpu_device pointer
673  * @pcie_index: mmio register offset
674  * @pcie_data: mmio register offset
675  * @reg_addr: indirect register address to read from
676  *
677  * Returns the value of indirect register @reg_addr
678  */
679 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
680 				u32 pcie_index, u32 pcie_data,
681 				u32 reg_addr)
682 {
683 	unsigned long flags;
684 	u32 r;
685 	void __iomem *pcie_index_offset;
686 	void __iomem *pcie_data_offset;
687 
688 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
689 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
690 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
691 
692 	writel(reg_addr, pcie_index_offset);
693 	readl(pcie_index_offset);
694 	r = readl(pcie_data_offset);
695 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
696 
697 	return r;
698 }
699 
700 /**
701  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
702  *
703  * @adev: amdgpu_device pointer
704  * @pcie_index: mmio register offset
705  * @pcie_data: mmio register offset
706  * @reg_addr: indirect register address to read from
707  *
708  * Returns the value of indirect register @reg_addr
709  */
710 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
711 				  u32 pcie_index, u32 pcie_data,
712 				  u32 reg_addr)
713 {
714 	unsigned long flags;
715 	u64 r;
716 	void __iomem *pcie_index_offset;
717 	void __iomem *pcie_data_offset;
718 
719 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
720 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
721 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
722 
723 	/* read low 32 bits */
724 	writel(reg_addr, pcie_index_offset);
725 	readl(pcie_index_offset);
726 	r = readl(pcie_data_offset);
727 	/* read high 32 bits */
728 	writel(reg_addr + 4, pcie_index_offset);
729 	readl(pcie_index_offset);
730 	r |= ((u64)readl(pcie_data_offset) << 32);
731 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
732 
733 	return r;
734 }
735 
736 /**
737  * amdgpu_device_indirect_wreg - write an indirect register address
738  *
739  * @adev: amdgpu_device pointer
740  * @pcie_index: mmio register offset
741  * @pcie_data: mmio register offset
742  * @reg_addr: indirect register offset
743  * @reg_data: indirect register data
744  *
745  */
746 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
747 				 u32 pcie_index, u32 pcie_data,
748 				 u32 reg_addr, u32 reg_data)
749 {
750 	unsigned long flags;
751 	void __iomem *pcie_index_offset;
752 	void __iomem *pcie_data_offset;
753 
754 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
755 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
756 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
757 
758 	writel(reg_addr, pcie_index_offset);
759 	readl(pcie_index_offset);
760 	writel(reg_data, pcie_data_offset);
761 	readl(pcie_data_offset);
762 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
763 }
764 
765 /**
766  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
767  *
768  * @adev: amdgpu_device pointer
769  * @pcie_index: mmio register offset
770  * @pcie_data: mmio register offset
771  * @reg_addr: indirect register offset
772  * @reg_data: indirect register data
773  *
774  */
775 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
776 				   u32 pcie_index, u32 pcie_data,
777 				   u32 reg_addr, u64 reg_data)
778 {
779 	unsigned long flags;
780 	void __iomem *pcie_index_offset;
781 	void __iomem *pcie_data_offset;
782 
783 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
784 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
785 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
786 
787 	/* write low 32 bits */
788 	writel(reg_addr, pcie_index_offset);
789 	readl(pcie_index_offset);
790 	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
791 	readl(pcie_data_offset);
792 	/* write high 32 bits */
793 	writel(reg_addr + 4, pcie_index_offset);
794 	readl(pcie_index_offset);
795 	writel((u32)(reg_data >> 32), pcie_data_offset);
796 	readl(pcie_data_offset);
797 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
798 }
799 
800 /**
801  * amdgpu_invalid_rreg - dummy reg read function
802  *
803  * @adev: amdgpu_device pointer
804  * @reg: offset of register
805  *
806  * Dummy register read function.  Used for register blocks
807  * that certain asics don't have (all asics).
808  * Returns the value in the register.
809  */
810 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
811 {
812 	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
813 	BUG();
814 	return 0;
815 }
816 
817 /**
818  * amdgpu_invalid_wreg - dummy reg write function
819  *
820  * @adev: amdgpu_device pointer
821  * @reg: offset of register
822  * @v: value to write to the register
823  *
824  * Dummy register read function.  Used for register blocks
825  * that certain asics don't have (all asics).
826  */
827 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
828 {
829 	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
830 		  reg, v);
831 	BUG();
832 }
833 
834 /**
835  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
836  *
837  * @adev: amdgpu_device pointer
838  * @reg: offset of register
839  *
840  * Dummy register read function.  Used for register blocks
841  * that certain asics don't have (all asics).
842  * Returns the value in the register.
843  */
844 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
845 {
846 	DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
847 	BUG();
848 	return 0;
849 }
850 
851 /**
852  * amdgpu_invalid_wreg64 - dummy reg write function
853  *
854  * @adev: amdgpu_device pointer
855  * @reg: offset of register
856  * @v: value to write to the register
857  *
858  * Dummy register read function.  Used for register blocks
859  * that certain asics don't have (all asics).
860  */
861 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
862 {
863 	DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
864 		  reg, v);
865 	BUG();
866 }
867 
868 /**
869  * amdgpu_block_invalid_rreg - dummy reg read function
870  *
871  * @adev: amdgpu_device pointer
872  * @block: offset of instance
873  * @reg: offset of register
874  *
875  * Dummy register read function.  Used for register blocks
876  * that certain asics don't have (all asics).
877  * Returns the value in the register.
878  */
879 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
880 					  uint32_t block, uint32_t reg)
881 {
882 	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
883 		  reg, block);
884 	BUG();
885 	return 0;
886 }
887 
888 /**
889  * amdgpu_block_invalid_wreg - dummy reg write function
890  *
891  * @adev: amdgpu_device pointer
892  * @block: offset of instance
893  * @reg: offset of register
894  * @v: value to write to the register
895  *
896  * Dummy register read function.  Used for register blocks
897  * that certain asics don't have (all asics).
898  */
899 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
900 				      uint32_t block,
901 				      uint32_t reg, uint32_t v)
902 {
903 	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
904 		  reg, block, v);
905 	BUG();
906 }
907 
908 /**
909  * amdgpu_device_asic_init - Wrapper for atom asic_init
910  *
911  * @adev: amdgpu_device pointer
912  *
913  * Does any asic specific work and then calls atom asic init.
914  */
915 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
916 {
917 	amdgpu_asic_pre_asic_init(adev);
918 
919 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
920 		return amdgpu_atomfirmware_asic_init(adev, true);
921 	else
922 		return amdgpu_atom_asic_init(adev->mode_info.atom_context);
923 }
924 
925 /**
926  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
927  *
928  * @adev: amdgpu_device pointer
929  *
930  * Allocates a scratch page of VRAM for use by various things in the
931  * driver.
932  */
933 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
934 {
935 	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
936 				       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
937 				       &adev->vram_scratch.robj,
938 				       &adev->vram_scratch.gpu_addr,
939 				       (void **)&adev->vram_scratch.ptr);
940 }
941 
942 /**
943  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
944  *
945  * @adev: amdgpu_device pointer
946  *
947  * Frees the VRAM scratch page.
948  */
949 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
950 {
951 	amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
952 }
953 
954 /**
955  * amdgpu_device_program_register_sequence - program an array of registers.
956  *
957  * @adev: amdgpu_device pointer
958  * @registers: pointer to the register array
959  * @array_size: size of the register array
960  *
961  * Programs an array or registers with and and or masks.
962  * This is a helper for setting golden registers.
963  */
964 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
965 					     const u32 *registers,
966 					     const u32 array_size)
967 {
968 	u32 tmp, reg, and_mask, or_mask;
969 	int i;
970 
971 	if (array_size % 3)
972 		return;
973 
974 	for (i = 0; i < array_size; i +=3) {
975 		reg = registers[i + 0];
976 		and_mask = registers[i + 1];
977 		or_mask = registers[i + 2];
978 
979 		if (and_mask == 0xffffffff) {
980 			tmp = or_mask;
981 		} else {
982 			tmp = RREG32(reg);
983 			tmp &= ~and_mask;
984 			if (adev->family >= AMDGPU_FAMILY_AI)
985 				tmp |= (or_mask & and_mask);
986 			else
987 				tmp |= or_mask;
988 		}
989 		WREG32(reg, tmp);
990 	}
991 }
992 
993 /**
994  * amdgpu_device_pci_config_reset - reset the GPU
995  *
996  * @adev: amdgpu_device pointer
997  *
998  * Resets the GPU using the pci config reset sequence.
999  * Only applicable to asics prior to vega10.
1000  */
1001 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1002 {
1003 	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1004 }
1005 
1006 /**
1007  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1008  *
1009  * @adev: amdgpu_device pointer
1010  *
1011  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1012  */
1013 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1014 {
1015 	return pci_reset_function(adev->pdev);
1016 }
1017 
1018 /*
1019  * GPU doorbell aperture helpers function.
1020  */
1021 /**
1022  * amdgpu_device_doorbell_init - Init doorbell driver information.
1023  *
1024  * @adev: amdgpu_device pointer
1025  *
1026  * Init doorbell driver information (CIK)
1027  * Returns 0 on success, error on failure.
1028  */
1029 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1030 {
1031 
1032 	/* No doorbell on SI hardware generation */
1033 	if (adev->asic_type < CHIP_BONAIRE) {
1034 		adev->doorbell.base = 0;
1035 		adev->doorbell.size = 0;
1036 		adev->doorbell.num_doorbells = 0;
1037 		adev->doorbell.ptr = NULL;
1038 		return 0;
1039 	}
1040 
1041 	if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1042 		return -EINVAL;
1043 
1044 	amdgpu_asic_init_doorbell_index(adev);
1045 
1046 	/* doorbell bar mapping */
1047 	adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1048 	adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1049 
1050 	if (adev->enable_mes) {
1051 		adev->doorbell.num_doorbells =
1052 			adev->doorbell.size / sizeof(u32);
1053 	} else {
1054 		adev->doorbell.num_doorbells =
1055 			min_t(u32, adev->doorbell.size / sizeof(u32),
1056 			      adev->doorbell_index.max_assignment+1);
1057 		if (adev->doorbell.num_doorbells == 0)
1058 			return -EINVAL;
1059 
1060 		/* For Vega, reserve and map two pages on doorbell BAR since SDMA
1061 		 * paging queue doorbell use the second page. The
1062 		 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1063 		 * doorbells are in the first page. So with paging queue enabled,
1064 		 * the max num_doorbells should + 1 page (0x400 in dword)
1065 		 */
1066 		if (adev->asic_type >= CHIP_VEGA10)
1067 			adev->doorbell.num_doorbells += 0x400;
1068 	}
1069 
1070 	adev->doorbell.ptr = ioremap(adev->doorbell.base,
1071 				     adev->doorbell.num_doorbells *
1072 				     sizeof(u32));
1073 	if (adev->doorbell.ptr == NULL)
1074 		return -ENOMEM;
1075 
1076 	return 0;
1077 }
1078 
1079 /**
1080  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1081  *
1082  * @adev: amdgpu_device pointer
1083  *
1084  * Tear down doorbell driver information (CIK)
1085  */
1086 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1087 {
1088 	iounmap(adev->doorbell.ptr);
1089 	adev->doorbell.ptr = NULL;
1090 }
1091 
1092 
1093 
1094 /*
1095  * amdgpu_device_wb_*()
1096  * Writeback is the method by which the GPU updates special pages in memory
1097  * with the status of certain GPU events (fences, ring pointers,etc.).
1098  */
1099 
1100 /**
1101  * amdgpu_device_wb_fini - Disable Writeback and free memory
1102  *
1103  * @adev: amdgpu_device pointer
1104  *
1105  * Disables Writeback and frees the Writeback memory (all asics).
1106  * Used at driver shutdown.
1107  */
1108 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1109 {
1110 	if (adev->wb.wb_obj) {
1111 		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1112 				      &adev->wb.gpu_addr,
1113 				      (void **)&adev->wb.wb);
1114 		adev->wb.wb_obj = NULL;
1115 	}
1116 }
1117 
1118 /**
1119  * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1120  *
1121  * @adev: amdgpu_device pointer
1122  *
1123  * Initializes writeback and allocates writeback memory (all asics).
1124  * Used at driver startup.
1125  * Returns 0 on success or an -error on failure.
1126  */
1127 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1128 {
1129 	int r;
1130 
1131 	if (adev->wb.wb_obj == NULL) {
1132 		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1133 		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1134 					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1135 					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
1136 					    (void **)&adev->wb.wb);
1137 		if (r) {
1138 			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1139 			return r;
1140 		}
1141 
1142 		adev->wb.num_wb = AMDGPU_MAX_WB;
1143 		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1144 
1145 		/* clear wb memory */
1146 		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1147 	}
1148 
1149 	return 0;
1150 }
1151 
1152 /**
1153  * amdgpu_device_wb_get - Allocate a wb entry
1154  *
1155  * @adev: amdgpu_device pointer
1156  * @wb: wb index
1157  *
1158  * Allocate a wb slot for use by the driver (all asics).
1159  * Returns 0 on success or -EINVAL on failure.
1160  */
1161 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1162 {
1163 	unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1164 
1165 	if (offset < adev->wb.num_wb) {
1166 		__set_bit(offset, adev->wb.used);
1167 		*wb = offset << 3; /* convert to dw offset */
1168 		return 0;
1169 	} else {
1170 		return -EINVAL;
1171 	}
1172 }
1173 
1174 /**
1175  * amdgpu_device_wb_free - Free a wb entry
1176  *
1177  * @adev: amdgpu_device pointer
1178  * @wb: wb index
1179  *
1180  * Free a wb slot allocated for use by the driver (all asics)
1181  */
1182 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1183 {
1184 	wb >>= 3;
1185 	if (wb < adev->wb.num_wb)
1186 		__clear_bit(wb, adev->wb.used);
1187 }
1188 
1189 /**
1190  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1191  *
1192  * @adev: amdgpu_device pointer
1193  *
1194  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1195  * to fail, but if any of the BARs is not accessible after the size we abort
1196  * driver loading by returning -ENODEV.
1197  */
1198 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1199 {
1200 	int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1201 	struct pci_bus *root;
1202 	struct resource *res;
1203 	unsigned i;
1204 	u16 cmd;
1205 	int r;
1206 
1207 	/* Bypass for VF */
1208 	if (amdgpu_sriov_vf(adev))
1209 		return 0;
1210 
1211 	/* skip if the bios has already enabled large BAR */
1212 	if (adev->gmc.real_vram_size &&
1213 	    (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1214 		return 0;
1215 
1216 	/* Check if the root BUS has 64bit memory resources */
1217 	root = adev->pdev->bus;
1218 	while (root->parent)
1219 		root = root->parent;
1220 
1221 	pci_bus_for_each_resource(root, res, i) {
1222 		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1223 		    res->start > 0x100000000ull)
1224 			break;
1225 	}
1226 
1227 	/* Trying to resize is pointless without a root hub window above 4GB */
1228 	if (!res)
1229 		return 0;
1230 
1231 	/* Limit the BAR size to what is available */
1232 	rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1233 			rbar_size);
1234 
1235 	/* Disable memory decoding while we change the BAR addresses and size */
1236 	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1237 	pci_write_config_word(adev->pdev, PCI_COMMAND,
1238 			      cmd & ~PCI_COMMAND_MEMORY);
1239 
1240 	/* Free the VRAM and doorbell BAR, we most likely need to move both. */
1241 	amdgpu_device_doorbell_fini(adev);
1242 	if (adev->asic_type >= CHIP_BONAIRE)
1243 		pci_release_resource(adev->pdev, 2);
1244 
1245 	pci_release_resource(adev->pdev, 0);
1246 
1247 	r = pci_resize_resource(adev->pdev, 0, rbar_size);
1248 	if (r == -ENOSPC)
1249 		DRM_INFO("Not enough PCI address space for a large BAR.");
1250 	else if (r && r != -ENOTSUPP)
1251 		DRM_ERROR("Problem resizing BAR0 (%d).", r);
1252 
1253 	pci_assign_unassigned_bus_resources(adev->pdev->bus);
1254 
1255 	/* When the doorbell or fb BAR isn't available we have no chance of
1256 	 * using the device.
1257 	 */
1258 	r = amdgpu_device_doorbell_init(adev);
1259 	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1260 		return -ENODEV;
1261 
1262 	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1263 
1264 	return 0;
1265 }
1266 
1267 /*
1268  * GPU helpers function.
1269  */
1270 /**
1271  * amdgpu_device_need_post - check if the hw need post or not
1272  *
1273  * @adev: amdgpu_device pointer
1274  *
1275  * Check if the asic has been initialized (all asics) at driver startup
1276  * or post is needed if  hw reset is performed.
1277  * Returns true if need or false if not.
1278  */
1279 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1280 {
1281 	uint32_t reg;
1282 
1283 	if (amdgpu_sriov_vf(adev))
1284 		return false;
1285 
1286 	if (amdgpu_passthrough(adev)) {
1287 		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1288 		 * some old smc fw still need driver do vPost otherwise gpu hang, while
1289 		 * those smc fw version above 22.15 doesn't have this flaw, so we force
1290 		 * vpost executed for smc version below 22.15
1291 		 */
1292 		if (adev->asic_type == CHIP_FIJI) {
1293 			int err;
1294 			uint32_t fw_ver;
1295 			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1296 			/* force vPost if error occured */
1297 			if (err)
1298 				return true;
1299 
1300 			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1301 			if (fw_ver < 0x00160e00)
1302 				return true;
1303 		}
1304 	}
1305 
1306 	/* Don't post if we need to reset whole hive on init */
1307 	if (adev->gmc.xgmi.pending_reset)
1308 		return false;
1309 
1310 	if (adev->has_hw_reset) {
1311 		adev->has_hw_reset = false;
1312 		return true;
1313 	}
1314 
1315 	/* bios scratch used on CIK+ */
1316 	if (adev->asic_type >= CHIP_BONAIRE)
1317 		return amdgpu_atombios_scratch_need_asic_init(adev);
1318 
1319 	/* check MEM_SIZE for older asics */
1320 	reg = amdgpu_asic_get_config_memsize(adev);
1321 
1322 	if ((reg != 0) && (reg != 0xffffffff))
1323 		return false;
1324 
1325 	return true;
1326 }
1327 
1328 /**
1329  * amdgpu_device_should_use_aspm - check if the device should program ASPM
1330  *
1331  * @adev: amdgpu_device pointer
1332  *
1333  * Confirm whether the module parameter and pcie bridge agree that ASPM should
1334  * be set for this device.
1335  *
1336  * Returns true if it should be used or false if not.
1337  */
1338 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1339 {
1340 	switch (amdgpu_aspm) {
1341 	case -1:
1342 		break;
1343 	case 0:
1344 		return false;
1345 	case 1:
1346 		return true;
1347 	default:
1348 		return false;
1349 	}
1350 	return pcie_aspm_enabled(adev->pdev);
1351 }
1352 
1353 /* if we get transitioned to only one device, take VGA back */
1354 /**
1355  * amdgpu_device_vga_set_decode - enable/disable vga decode
1356  *
1357  * @pdev: PCI device pointer
1358  * @state: enable/disable vga decode
1359  *
1360  * Enable/disable vga decode (all asics).
1361  * Returns VGA resource flags.
1362  */
1363 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1364 		bool state)
1365 {
1366 	struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1367 	amdgpu_asic_set_vga_state(adev, state);
1368 	if (state)
1369 		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1370 		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1371 	else
1372 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1373 }
1374 
1375 /**
1376  * amdgpu_device_check_block_size - validate the vm block size
1377  *
1378  * @adev: amdgpu_device pointer
1379  *
1380  * Validates the vm block size specified via module parameter.
1381  * The vm block size defines number of bits in page table versus page directory,
1382  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1383  * page table and the remaining bits are in the page directory.
1384  */
1385 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1386 {
1387 	/* defines number of bits in page table versus page directory,
1388 	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1389 	 * page table and the remaining bits are in the page directory */
1390 	if (amdgpu_vm_block_size == -1)
1391 		return;
1392 
1393 	if (amdgpu_vm_block_size < 9) {
1394 		dev_warn(adev->dev, "VM page table size (%d) too small\n",
1395 			 amdgpu_vm_block_size);
1396 		amdgpu_vm_block_size = -1;
1397 	}
1398 }
1399 
1400 /**
1401  * amdgpu_device_check_vm_size - validate the vm size
1402  *
1403  * @adev: amdgpu_device pointer
1404  *
1405  * Validates the vm size in GB specified via module parameter.
1406  * The VM size is the size of the GPU virtual memory space in GB.
1407  */
1408 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1409 {
1410 	/* no need to check the default value */
1411 	if (amdgpu_vm_size == -1)
1412 		return;
1413 
1414 	if (amdgpu_vm_size < 1) {
1415 		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1416 			 amdgpu_vm_size);
1417 		amdgpu_vm_size = -1;
1418 	}
1419 }
1420 
1421 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1422 {
1423 	struct sysinfo si;
1424 	bool is_os_64 = (sizeof(void *) == 8);
1425 	uint64_t total_memory;
1426 	uint64_t dram_size_seven_GB = 0x1B8000000;
1427 	uint64_t dram_size_three_GB = 0xB8000000;
1428 
1429 	if (amdgpu_smu_memory_pool_size == 0)
1430 		return;
1431 
1432 	if (!is_os_64) {
1433 		DRM_WARN("Not 64-bit OS, feature not supported\n");
1434 		goto def_value;
1435 	}
1436 	si_meminfo(&si);
1437 	total_memory = (uint64_t)si.totalram * si.mem_unit;
1438 
1439 	if ((amdgpu_smu_memory_pool_size == 1) ||
1440 		(amdgpu_smu_memory_pool_size == 2)) {
1441 		if (total_memory < dram_size_three_GB)
1442 			goto def_value1;
1443 	} else if ((amdgpu_smu_memory_pool_size == 4) ||
1444 		(amdgpu_smu_memory_pool_size == 8)) {
1445 		if (total_memory < dram_size_seven_GB)
1446 			goto def_value1;
1447 	} else {
1448 		DRM_WARN("Smu memory pool size not supported\n");
1449 		goto def_value;
1450 	}
1451 	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1452 
1453 	return;
1454 
1455 def_value1:
1456 	DRM_WARN("No enough system memory\n");
1457 def_value:
1458 	adev->pm.smu_prv_buffer_size = 0;
1459 }
1460 
1461 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1462 {
1463 	if (!(adev->flags & AMD_IS_APU) ||
1464 	    adev->asic_type < CHIP_RAVEN)
1465 		return 0;
1466 
1467 	switch (adev->asic_type) {
1468 	case CHIP_RAVEN:
1469 		if (adev->pdev->device == 0x15dd)
1470 			adev->apu_flags |= AMD_APU_IS_RAVEN;
1471 		if (adev->pdev->device == 0x15d8)
1472 			adev->apu_flags |= AMD_APU_IS_PICASSO;
1473 		break;
1474 	case CHIP_RENOIR:
1475 		if ((adev->pdev->device == 0x1636) ||
1476 		    (adev->pdev->device == 0x164c))
1477 			adev->apu_flags |= AMD_APU_IS_RENOIR;
1478 		else
1479 			adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1480 		break;
1481 	case CHIP_VANGOGH:
1482 		adev->apu_flags |= AMD_APU_IS_VANGOGH;
1483 		break;
1484 	case CHIP_YELLOW_CARP:
1485 		break;
1486 	case CHIP_CYAN_SKILLFISH:
1487 		if ((adev->pdev->device == 0x13FE) ||
1488 		    (adev->pdev->device == 0x143F))
1489 			adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1490 		break;
1491 	default:
1492 		break;
1493 	}
1494 
1495 	return 0;
1496 }
1497 
1498 /**
1499  * amdgpu_device_check_arguments - validate module params
1500  *
1501  * @adev: amdgpu_device pointer
1502  *
1503  * Validates certain module parameters and updates
1504  * the associated values used by the driver (all asics).
1505  */
1506 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1507 {
1508 	if (amdgpu_sched_jobs < 4) {
1509 		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1510 			 amdgpu_sched_jobs);
1511 		amdgpu_sched_jobs = 4;
1512 	} else if (!is_power_of_2(amdgpu_sched_jobs)){
1513 		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1514 			 amdgpu_sched_jobs);
1515 		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1516 	}
1517 
1518 	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1519 		/* gart size must be greater or equal to 32M */
1520 		dev_warn(adev->dev, "gart size (%d) too small\n",
1521 			 amdgpu_gart_size);
1522 		amdgpu_gart_size = -1;
1523 	}
1524 
1525 	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1526 		/* gtt size must be greater or equal to 32M */
1527 		dev_warn(adev->dev, "gtt size (%d) too small\n",
1528 				 amdgpu_gtt_size);
1529 		amdgpu_gtt_size = -1;
1530 	}
1531 
1532 	/* valid range is between 4 and 9 inclusive */
1533 	if (amdgpu_vm_fragment_size != -1 &&
1534 	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1535 		dev_warn(adev->dev, "valid range is between 4 and 9\n");
1536 		amdgpu_vm_fragment_size = -1;
1537 	}
1538 
1539 	if (amdgpu_sched_hw_submission < 2) {
1540 		dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1541 			 amdgpu_sched_hw_submission);
1542 		amdgpu_sched_hw_submission = 2;
1543 	} else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1544 		dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1545 			 amdgpu_sched_hw_submission);
1546 		amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1547 	}
1548 
1549 	if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1550 		dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1551 		amdgpu_reset_method = -1;
1552 	}
1553 
1554 	amdgpu_device_check_smu_prv_buffer_size(adev);
1555 
1556 	amdgpu_device_check_vm_size(adev);
1557 
1558 	amdgpu_device_check_block_size(adev);
1559 
1560 	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1561 
1562 	return 0;
1563 }
1564 
1565 /**
1566  * amdgpu_switcheroo_set_state - set switcheroo state
1567  *
1568  * @pdev: pci dev pointer
1569  * @state: vga_switcheroo state
1570  *
1571  * Callback for the switcheroo driver.  Suspends or resumes
1572  * the asics before or after it is powered up using ACPI methods.
1573  */
1574 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1575 					enum vga_switcheroo_state state)
1576 {
1577 	struct drm_device *dev = pci_get_drvdata(pdev);
1578 	int r;
1579 
1580 	if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1581 		return;
1582 
1583 	if (state == VGA_SWITCHEROO_ON) {
1584 		pr_info("switched on\n");
1585 		/* don't suspend or resume card normally */
1586 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1587 
1588 		pci_set_power_state(pdev, PCI_D0);
1589 		amdgpu_device_load_pci_state(pdev);
1590 		r = pci_enable_device(pdev);
1591 		if (r)
1592 			DRM_WARN("pci_enable_device failed (%d)\n", r);
1593 		amdgpu_device_resume(dev, true);
1594 
1595 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
1596 	} else {
1597 		pr_info("switched off\n");
1598 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1599 		amdgpu_device_suspend(dev, true);
1600 		amdgpu_device_cache_pci_state(pdev);
1601 		/* Shut down the device */
1602 		pci_disable_device(pdev);
1603 		pci_set_power_state(pdev, PCI_D3cold);
1604 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1605 	}
1606 }
1607 
1608 /**
1609  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1610  *
1611  * @pdev: pci dev pointer
1612  *
1613  * Callback for the switcheroo driver.  Check of the switcheroo
1614  * state can be changed.
1615  * Returns true if the state can be changed, false if not.
1616  */
1617 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1618 {
1619 	struct drm_device *dev = pci_get_drvdata(pdev);
1620 
1621 	/*
1622 	* FIXME: open_count is protected by drm_global_mutex but that would lead to
1623 	* locking inversion with the driver load path. And the access here is
1624 	* completely racy anyway. So don't bother with locking for now.
1625 	*/
1626 	return atomic_read(&dev->open_count) == 0;
1627 }
1628 
1629 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1630 	.set_gpu_state = amdgpu_switcheroo_set_state,
1631 	.reprobe = NULL,
1632 	.can_switch = amdgpu_switcheroo_can_switch,
1633 };
1634 
1635 /**
1636  * amdgpu_device_ip_set_clockgating_state - set the CG state
1637  *
1638  * @dev: amdgpu_device pointer
1639  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1640  * @state: clockgating state (gate or ungate)
1641  *
1642  * Sets the requested clockgating state for all instances of
1643  * the hardware IP specified.
1644  * Returns the error code from the last instance.
1645  */
1646 int amdgpu_device_ip_set_clockgating_state(void *dev,
1647 					   enum amd_ip_block_type block_type,
1648 					   enum amd_clockgating_state state)
1649 {
1650 	struct amdgpu_device *adev = dev;
1651 	int i, r = 0;
1652 
1653 	for (i = 0; i < adev->num_ip_blocks; i++) {
1654 		if (!adev->ip_blocks[i].status.valid)
1655 			continue;
1656 		if (adev->ip_blocks[i].version->type != block_type)
1657 			continue;
1658 		if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1659 			continue;
1660 		r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1661 			(void *)adev, state);
1662 		if (r)
1663 			DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1664 				  adev->ip_blocks[i].version->funcs->name, r);
1665 	}
1666 	return r;
1667 }
1668 
1669 /**
1670  * amdgpu_device_ip_set_powergating_state - set the PG state
1671  *
1672  * @dev: amdgpu_device pointer
1673  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1674  * @state: powergating state (gate or ungate)
1675  *
1676  * Sets the requested powergating state for all instances of
1677  * the hardware IP specified.
1678  * Returns the error code from the last instance.
1679  */
1680 int amdgpu_device_ip_set_powergating_state(void *dev,
1681 					   enum amd_ip_block_type block_type,
1682 					   enum amd_powergating_state state)
1683 {
1684 	struct amdgpu_device *adev = dev;
1685 	int i, r = 0;
1686 
1687 	for (i = 0; i < adev->num_ip_blocks; i++) {
1688 		if (!adev->ip_blocks[i].status.valid)
1689 			continue;
1690 		if (adev->ip_blocks[i].version->type != block_type)
1691 			continue;
1692 		if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1693 			continue;
1694 		r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1695 			(void *)adev, state);
1696 		if (r)
1697 			DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1698 				  adev->ip_blocks[i].version->funcs->name, r);
1699 	}
1700 	return r;
1701 }
1702 
1703 /**
1704  * amdgpu_device_ip_get_clockgating_state - get the CG state
1705  *
1706  * @adev: amdgpu_device pointer
1707  * @flags: clockgating feature flags
1708  *
1709  * Walks the list of IPs on the device and updates the clockgating
1710  * flags for each IP.
1711  * Updates @flags with the feature flags for each hardware IP where
1712  * clockgating is enabled.
1713  */
1714 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1715 					    u64 *flags)
1716 {
1717 	int i;
1718 
1719 	for (i = 0; i < adev->num_ip_blocks; i++) {
1720 		if (!adev->ip_blocks[i].status.valid)
1721 			continue;
1722 		if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1723 			adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1724 	}
1725 }
1726 
1727 /**
1728  * amdgpu_device_ip_wait_for_idle - wait for idle
1729  *
1730  * @adev: amdgpu_device pointer
1731  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1732  *
1733  * Waits for the request hardware IP to be idle.
1734  * Returns 0 for success or a negative error code on failure.
1735  */
1736 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1737 				   enum amd_ip_block_type block_type)
1738 {
1739 	int i, r;
1740 
1741 	for (i = 0; i < adev->num_ip_blocks; i++) {
1742 		if (!adev->ip_blocks[i].status.valid)
1743 			continue;
1744 		if (adev->ip_blocks[i].version->type == block_type) {
1745 			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1746 			if (r)
1747 				return r;
1748 			break;
1749 		}
1750 	}
1751 	return 0;
1752 
1753 }
1754 
1755 /**
1756  * amdgpu_device_ip_is_idle - is the hardware IP idle
1757  *
1758  * @adev: amdgpu_device pointer
1759  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1760  *
1761  * Check if the hardware IP is idle or not.
1762  * Returns true if it the IP is idle, false if not.
1763  */
1764 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1765 			      enum amd_ip_block_type block_type)
1766 {
1767 	int i;
1768 
1769 	for (i = 0; i < adev->num_ip_blocks; i++) {
1770 		if (!adev->ip_blocks[i].status.valid)
1771 			continue;
1772 		if (adev->ip_blocks[i].version->type == block_type)
1773 			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1774 	}
1775 	return true;
1776 
1777 }
1778 
1779 /**
1780  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1781  *
1782  * @adev: amdgpu_device pointer
1783  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1784  *
1785  * Returns a pointer to the hardware IP block structure
1786  * if it exists for the asic, otherwise NULL.
1787  */
1788 struct amdgpu_ip_block *
1789 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1790 			      enum amd_ip_block_type type)
1791 {
1792 	int i;
1793 
1794 	for (i = 0; i < adev->num_ip_blocks; i++)
1795 		if (adev->ip_blocks[i].version->type == type)
1796 			return &adev->ip_blocks[i];
1797 
1798 	return NULL;
1799 }
1800 
1801 /**
1802  * amdgpu_device_ip_block_version_cmp
1803  *
1804  * @adev: amdgpu_device pointer
1805  * @type: enum amd_ip_block_type
1806  * @major: major version
1807  * @minor: minor version
1808  *
1809  * return 0 if equal or greater
1810  * return 1 if smaller or the ip_block doesn't exist
1811  */
1812 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1813 				       enum amd_ip_block_type type,
1814 				       u32 major, u32 minor)
1815 {
1816 	struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1817 
1818 	if (ip_block && ((ip_block->version->major > major) ||
1819 			((ip_block->version->major == major) &&
1820 			(ip_block->version->minor >= minor))))
1821 		return 0;
1822 
1823 	return 1;
1824 }
1825 
1826 /**
1827  * amdgpu_device_ip_block_add
1828  *
1829  * @adev: amdgpu_device pointer
1830  * @ip_block_version: pointer to the IP to add
1831  *
1832  * Adds the IP block driver information to the collection of IPs
1833  * on the asic.
1834  */
1835 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1836 			       const struct amdgpu_ip_block_version *ip_block_version)
1837 {
1838 	if (!ip_block_version)
1839 		return -EINVAL;
1840 
1841 	switch (ip_block_version->type) {
1842 	case AMD_IP_BLOCK_TYPE_VCN:
1843 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1844 			return 0;
1845 		break;
1846 	case AMD_IP_BLOCK_TYPE_JPEG:
1847 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1848 			return 0;
1849 		break;
1850 	default:
1851 		break;
1852 	}
1853 
1854 	DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1855 		  ip_block_version->funcs->name);
1856 
1857 	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1858 
1859 	return 0;
1860 }
1861 
1862 /**
1863  * amdgpu_device_enable_virtual_display - enable virtual display feature
1864  *
1865  * @adev: amdgpu_device pointer
1866  *
1867  * Enabled the virtual display feature if the user has enabled it via
1868  * the module parameter virtual_display.  This feature provides a virtual
1869  * display hardware on headless boards or in virtualized environments.
1870  * This function parses and validates the configuration string specified by
1871  * the user and configues the virtual display configuration (number of
1872  * virtual connectors, crtcs, etc.) specified.
1873  */
1874 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1875 {
1876 	adev->enable_virtual_display = false;
1877 
1878 	if (amdgpu_virtual_display) {
1879 		const char *pci_address_name = pci_name(adev->pdev);
1880 		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1881 
1882 		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1883 		pciaddstr_tmp = pciaddstr;
1884 		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1885 			pciaddname = strsep(&pciaddname_tmp, ",");
1886 			if (!strcmp("all", pciaddname)
1887 			    || !strcmp(pci_address_name, pciaddname)) {
1888 				long num_crtc;
1889 				int res = -1;
1890 
1891 				adev->enable_virtual_display = true;
1892 
1893 				if (pciaddname_tmp)
1894 					res = kstrtol(pciaddname_tmp, 10,
1895 						      &num_crtc);
1896 
1897 				if (!res) {
1898 					if (num_crtc < 1)
1899 						num_crtc = 1;
1900 					if (num_crtc > 6)
1901 						num_crtc = 6;
1902 					adev->mode_info.num_crtc = num_crtc;
1903 				} else {
1904 					adev->mode_info.num_crtc = 1;
1905 				}
1906 				break;
1907 			}
1908 		}
1909 
1910 		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1911 			 amdgpu_virtual_display, pci_address_name,
1912 			 adev->enable_virtual_display, adev->mode_info.num_crtc);
1913 
1914 		kfree(pciaddstr);
1915 	}
1916 }
1917 
1918 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
1919 {
1920 	if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
1921 		adev->mode_info.num_crtc = 1;
1922 		adev->enable_virtual_display = true;
1923 		DRM_INFO("virtual_display:%d, num_crtc:%d\n",
1924 			 adev->enable_virtual_display, adev->mode_info.num_crtc);
1925 	}
1926 }
1927 
1928 /**
1929  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1930  *
1931  * @adev: amdgpu_device pointer
1932  *
1933  * Parses the asic configuration parameters specified in the gpu info
1934  * firmware and makes them availale to the driver for use in configuring
1935  * the asic.
1936  * Returns 0 on success, -EINVAL on failure.
1937  */
1938 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1939 {
1940 	const char *chip_name;
1941 	char fw_name[40];
1942 	int err;
1943 	const struct gpu_info_firmware_header_v1_0 *hdr;
1944 
1945 	adev->firmware.gpu_info_fw = NULL;
1946 
1947 	if (adev->mman.discovery_bin) {
1948 		/*
1949 		 * FIXME: The bounding box is still needed by Navi12, so
1950 		 * temporarily read it from gpu_info firmware. Should be dropped
1951 		 * when DAL no longer needs it.
1952 		 */
1953 		if (adev->asic_type != CHIP_NAVI12)
1954 			return 0;
1955 	}
1956 
1957 	switch (adev->asic_type) {
1958 	default:
1959 		return 0;
1960 	case CHIP_VEGA10:
1961 		chip_name = "vega10";
1962 		break;
1963 	case CHIP_VEGA12:
1964 		chip_name = "vega12";
1965 		break;
1966 	case CHIP_RAVEN:
1967 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1968 			chip_name = "raven2";
1969 		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1970 			chip_name = "picasso";
1971 		else
1972 			chip_name = "raven";
1973 		break;
1974 	case CHIP_ARCTURUS:
1975 		chip_name = "arcturus";
1976 		break;
1977 	case CHIP_NAVI12:
1978 		chip_name = "navi12";
1979 		break;
1980 	}
1981 
1982 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1983 	err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1984 	if (err) {
1985 		dev_err(adev->dev,
1986 			"Failed to load gpu_info firmware \"%s\"\n",
1987 			fw_name);
1988 		goto out;
1989 	}
1990 	err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1991 	if (err) {
1992 		dev_err(adev->dev,
1993 			"Failed to validate gpu_info firmware \"%s\"\n",
1994 			fw_name);
1995 		goto out;
1996 	}
1997 
1998 	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1999 	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
2000 
2001 	switch (hdr->version_major) {
2002 	case 1:
2003 	{
2004 		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2005 			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2006 								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2007 
2008 		/*
2009 		 * Should be droped when DAL no longer needs it.
2010 		 */
2011 		if (adev->asic_type == CHIP_NAVI12)
2012 			goto parse_soc_bounding_box;
2013 
2014 		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2015 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2016 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2017 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2018 		adev->gfx.config.max_texture_channel_caches =
2019 			le32_to_cpu(gpu_info_fw->gc_num_tccs);
2020 		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2021 		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2022 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2023 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2024 		adev->gfx.config.double_offchip_lds_buf =
2025 			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2026 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2027 		adev->gfx.cu_info.max_waves_per_simd =
2028 			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2029 		adev->gfx.cu_info.max_scratch_slots_per_cu =
2030 			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2031 		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2032 		if (hdr->version_minor >= 1) {
2033 			const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2034 				(const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2035 									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2036 			adev->gfx.config.num_sc_per_sh =
2037 				le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2038 			adev->gfx.config.num_packer_per_sc =
2039 				le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2040 		}
2041 
2042 parse_soc_bounding_box:
2043 		/*
2044 		 * soc bounding box info is not integrated in disocovery table,
2045 		 * we always need to parse it from gpu info firmware if needed.
2046 		 */
2047 		if (hdr->version_minor == 2) {
2048 			const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2049 				(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2050 									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2051 			adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2052 		}
2053 		break;
2054 	}
2055 	default:
2056 		dev_err(adev->dev,
2057 			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2058 		err = -EINVAL;
2059 		goto out;
2060 	}
2061 out:
2062 	return err;
2063 }
2064 
2065 /**
2066  * amdgpu_device_ip_early_init - run early init for hardware IPs
2067  *
2068  * @adev: amdgpu_device pointer
2069  *
2070  * Early initialization pass for hardware IPs.  The hardware IPs that make
2071  * up each asic are discovered each IP's early_init callback is run.  This
2072  * is the first stage in initializing the asic.
2073  * Returns 0 on success, negative error code on failure.
2074  */
2075 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2076 {
2077 	struct drm_device *dev = adev_to_drm(adev);
2078 	struct pci_dev *parent;
2079 	int i, r;
2080 
2081 	amdgpu_device_enable_virtual_display(adev);
2082 
2083 	if (amdgpu_sriov_vf(adev)) {
2084 		r = amdgpu_virt_request_full_gpu(adev, true);
2085 		if (r)
2086 			return r;
2087 	}
2088 
2089 	switch (adev->asic_type) {
2090 #ifdef CONFIG_DRM_AMDGPU_SI
2091 	case CHIP_VERDE:
2092 	case CHIP_TAHITI:
2093 	case CHIP_PITCAIRN:
2094 	case CHIP_OLAND:
2095 	case CHIP_HAINAN:
2096 		adev->family = AMDGPU_FAMILY_SI;
2097 		r = si_set_ip_blocks(adev);
2098 		if (r)
2099 			return r;
2100 		break;
2101 #endif
2102 #ifdef CONFIG_DRM_AMDGPU_CIK
2103 	case CHIP_BONAIRE:
2104 	case CHIP_HAWAII:
2105 	case CHIP_KAVERI:
2106 	case CHIP_KABINI:
2107 	case CHIP_MULLINS:
2108 		if (adev->flags & AMD_IS_APU)
2109 			adev->family = AMDGPU_FAMILY_KV;
2110 		else
2111 			adev->family = AMDGPU_FAMILY_CI;
2112 
2113 		r = cik_set_ip_blocks(adev);
2114 		if (r)
2115 			return r;
2116 		break;
2117 #endif
2118 	case CHIP_TOPAZ:
2119 	case CHIP_TONGA:
2120 	case CHIP_FIJI:
2121 	case CHIP_POLARIS10:
2122 	case CHIP_POLARIS11:
2123 	case CHIP_POLARIS12:
2124 	case CHIP_VEGAM:
2125 	case CHIP_CARRIZO:
2126 	case CHIP_STONEY:
2127 		if (adev->flags & AMD_IS_APU)
2128 			adev->family = AMDGPU_FAMILY_CZ;
2129 		else
2130 			adev->family = AMDGPU_FAMILY_VI;
2131 
2132 		r = vi_set_ip_blocks(adev);
2133 		if (r)
2134 			return r;
2135 		break;
2136 	default:
2137 		r = amdgpu_discovery_set_ip_blocks(adev);
2138 		if (r)
2139 			return r;
2140 		break;
2141 	}
2142 
2143 	if (amdgpu_has_atpx() &&
2144 	    (amdgpu_is_atpx_hybrid() ||
2145 	     amdgpu_has_atpx_dgpu_power_cntl()) &&
2146 	    ((adev->flags & AMD_IS_APU) == 0) &&
2147 	    !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
2148 		adev->flags |= AMD_IS_PX;
2149 
2150 	if (!(adev->flags & AMD_IS_APU)) {
2151 		parent = pci_upstream_bridge(adev->pdev);
2152 		adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2153 	}
2154 
2155 	amdgpu_amdkfd_device_probe(adev);
2156 
2157 	adev->pm.pp_feature = amdgpu_pp_feature_mask;
2158 	if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2159 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2160 	if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2161 		adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2162 
2163 	for (i = 0; i < adev->num_ip_blocks; i++) {
2164 		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2165 			DRM_ERROR("disabled ip block: %d <%s>\n",
2166 				  i, adev->ip_blocks[i].version->funcs->name);
2167 			adev->ip_blocks[i].status.valid = false;
2168 		} else {
2169 			if (adev->ip_blocks[i].version->funcs->early_init) {
2170 				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2171 				if (r == -ENOENT) {
2172 					adev->ip_blocks[i].status.valid = false;
2173 				} else if (r) {
2174 					DRM_ERROR("early_init of IP block <%s> failed %d\n",
2175 						  adev->ip_blocks[i].version->funcs->name, r);
2176 					return r;
2177 				} else {
2178 					adev->ip_blocks[i].status.valid = true;
2179 				}
2180 			} else {
2181 				adev->ip_blocks[i].status.valid = true;
2182 			}
2183 		}
2184 		/* get the vbios after the asic_funcs are set up */
2185 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2186 			r = amdgpu_device_parse_gpu_info_fw(adev);
2187 			if (r)
2188 				return r;
2189 
2190 			/* Read BIOS */
2191 			if (!amdgpu_get_bios(adev))
2192 				return -EINVAL;
2193 
2194 			r = amdgpu_atombios_init(adev);
2195 			if (r) {
2196 				dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2197 				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2198 				return r;
2199 			}
2200 
2201 			/*get pf2vf msg info at it's earliest time*/
2202 			if (amdgpu_sriov_vf(adev))
2203 				amdgpu_virt_init_data_exchange(adev);
2204 
2205 		}
2206 	}
2207 
2208 	adev->cg_flags &= amdgpu_cg_mask;
2209 	adev->pg_flags &= amdgpu_pg_mask;
2210 
2211 	return 0;
2212 }
2213 
2214 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2215 {
2216 	int i, r;
2217 
2218 	for (i = 0; i < adev->num_ip_blocks; i++) {
2219 		if (!adev->ip_blocks[i].status.sw)
2220 			continue;
2221 		if (adev->ip_blocks[i].status.hw)
2222 			continue;
2223 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2224 		    (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2225 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2226 			r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2227 			if (r) {
2228 				DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2229 					  adev->ip_blocks[i].version->funcs->name, r);
2230 				return r;
2231 			}
2232 			adev->ip_blocks[i].status.hw = true;
2233 		}
2234 	}
2235 
2236 	return 0;
2237 }
2238 
2239 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2240 {
2241 	int i, r;
2242 
2243 	for (i = 0; i < adev->num_ip_blocks; i++) {
2244 		if (!adev->ip_blocks[i].status.sw)
2245 			continue;
2246 		if (adev->ip_blocks[i].status.hw)
2247 			continue;
2248 		r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2249 		if (r) {
2250 			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2251 				  adev->ip_blocks[i].version->funcs->name, r);
2252 			return r;
2253 		}
2254 		adev->ip_blocks[i].status.hw = true;
2255 	}
2256 
2257 	return 0;
2258 }
2259 
2260 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2261 {
2262 	int r = 0;
2263 	int i;
2264 	uint32_t smu_version;
2265 
2266 	if (adev->asic_type >= CHIP_VEGA10) {
2267 		for (i = 0; i < adev->num_ip_blocks; i++) {
2268 			if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2269 				continue;
2270 
2271 			if (!adev->ip_blocks[i].status.sw)
2272 				continue;
2273 
2274 			/* no need to do the fw loading again if already done*/
2275 			if (adev->ip_blocks[i].status.hw == true)
2276 				break;
2277 
2278 			if (amdgpu_in_reset(adev) || adev->in_suspend) {
2279 				r = adev->ip_blocks[i].version->funcs->resume(adev);
2280 				if (r) {
2281 					DRM_ERROR("resume of IP block <%s> failed %d\n",
2282 							  adev->ip_blocks[i].version->funcs->name, r);
2283 					return r;
2284 				}
2285 			} else {
2286 				r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2287 				if (r) {
2288 					DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2289 							  adev->ip_blocks[i].version->funcs->name, r);
2290 					return r;
2291 				}
2292 			}
2293 
2294 			adev->ip_blocks[i].status.hw = true;
2295 			break;
2296 		}
2297 	}
2298 
2299 	if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2300 		r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2301 
2302 	return r;
2303 }
2304 
2305 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2306 {
2307 	long timeout;
2308 	int r, i;
2309 
2310 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2311 		struct amdgpu_ring *ring = adev->rings[i];
2312 
2313 		/* No need to setup the GPU scheduler for rings that don't need it */
2314 		if (!ring || ring->no_scheduler)
2315 			continue;
2316 
2317 		switch (ring->funcs->type) {
2318 		case AMDGPU_RING_TYPE_GFX:
2319 			timeout = adev->gfx_timeout;
2320 			break;
2321 		case AMDGPU_RING_TYPE_COMPUTE:
2322 			timeout = adev->compute_timeout;
2323 			break;
2324 		case AMDGPU_RING_TYPE_SDMA:
2325 			timeout = adev->sdma_timeout;
2326 			break;
2327 		default:
2328 			timeout = adev->video_timeout;
2329 			break;
2330 		}
2331 
2332 		r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
2333 				   ring->num_hw_submission, amdgpu_job_hang_limit,
2334 				   timeout, adev->reset_domain->wq,
2335 				   ring->sched_score, ring->name,
2336 				   adev->dev);
2337 		if (r) {
2338 			DRM_ERROR("Failed to create scheduler on ring %s.\n",
2339 				  ring->name);
2340 			return r;
2341 		}
2342 	}
2343 
2344 	return 0;
2345 }
2346 
2347 
2348 /**
2349  * amdgpu_device_ip_init - run init for hardware IPs
2350  *
2351  * @adev: amdgpu_device pointer
2352  *
2353  * Main initialization pass for hardware IPs.  The list of all the hardware
2354  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2355  * are run.  sw_init initializes the software state associated with each IP
2356  * and hw_init initializes the hardware associated with each IP.
2357  * Returns 0 on success, negative error code on failure.
2358  */
2359 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2360 {
2361 	int i, r;
2362 
2363 	r = amdgpu_ras_init(adev);
2364 	if (r)
2365 		return r;
2366 
2367 	for (i = 0; i < adev->num_ip_blocks; i++) {
2368 		if (!adev->ip_blocks[i].status.valid)
2369 			continue;
2370 		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2371 		if (r) {
2372 			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2373 				  adev->ip_blocks[i].version->funcs->name, r);
2374 			goto init_failed;
2375 		}
2376 		adev->ip_blocks[i].status.sw = true;
2377 
2378 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2379 			/* need to do common hw init early so everything is set up for gmc */
2380 			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2381 			if (r) {
2382 				DRM_ERROR("hw_init %d failed %d\n", i, r);
2383 				goto init_failed;
2384 			}
2385 			adev->ip_blocks[i].status.hw = true;
2386 		} else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2387 			/* need to do gmc hw init early so we can allocate gpu mem */
2388 			/* Try to reserve bad pages early */
2389 			if (amdgpu_sriov_vf(adev))
2390 				amdgpu_virt_exchange_data(adev);
2391 
2392 			r = amdgpu_device_vram_scratch_init(adev);
2393 			if (r) {
2394 				DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2395 				goto init_failed;
2396 			}
2397 			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2398 			if (r) {
2399 				DRM_ERROR("hw_init %d failed %d\n", i, r);
2400 				goto init_failed;
2401 			}
2402 			r = amdgpu_device_wb_init(adev);
2403 			if (r) {
2404 				DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2405 				goto init_failed;
2406 			}
2407 			adev->ip_blocks[i].status.hw = true;
2408 
2409 			/* right after GMC hw init, we create CSA */
2410 			if (amdgpu_mcbp) {
2411 				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2412 								AMDGPU_GEM_DOMAIN_VRAM,
2413 								AMDGPU_CSA_SIZE);
2414 				if (r) {
2415 					DRM_ERROR("allocate CSA failed %d\n", r);
2416 					goto init_failed;
2417 				}
2418 			}
2419 		}
2420 	}
2421 
2422 	if (amdgpu_sriov_vf(adev))
2423 		amdgpu_virt_init_data_exchange(adev);
2424 
2425 	r = amdgpu_ib_pool_init(adev);
2426 	if (r) {
2427 		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2428 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2429 		goto init_failed;
2430 	}
2431 
2432 	r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2433 	if (r)
2434 		goto init_failed;
2435 
2436 	r = amdgpu_device_ip_hw_init_phase1(adev);
2437 	if (r)
2438 		goto init_failed;
2439 
2440 	r = amdgpu_device_fw_loading(adev);
2441 	if (r)
2442 		goto init_failed;
2443 
2444 	r = amdgpu_device_ip_hw_init_phase2(adev);
2445 	if (r)
2446 		goto init_failed;
2447 
2448 	/*
2449 	 * retired pages will be loaded from eeprom and reserved here,
2450 	 * it should be called after amdgpu_device_ip_hw_init_phase2  since
2451 	 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2452 	 * for I2C communication which only true at this point.
2453 	 *
2454 	 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2455 	 * failure from bad gpu situation and stop amdgpu init process
2456 	 * accordingly. For other failed cases, it will still release all
2457 	 * the resource and print error message, rather than returning one
2458 	 * negative value to upper level.
2459 	 *
2460 	 * Note: theoretically, this should be called before all vram allocations
2461 	 * to protect retired page from abusing
2462 	 */
2463 	r = amdgpu_ras_recovery_init(adev);
2464 	if (r)
2465 		goto init_failed;
2466 
2467 	/**
2468 	 * In case of XGMI grab extra reference for reset domain for this device
2469 	 */
2470 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2471 		if (amdgpu_xgmi_add_device(adev) == 0) {
2472 			if (!amdgpu_sriov_vf(adev)) {
2473 				struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2474 
2475 				if (!hive->reset_domain ||
2476 				    !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2477 					r = -ENOENT;
2478 					amdgpu_put_xgmi_hive(hive);
2479 					goto init_failed;
2480 				}
2481 
2482 				/* Drop the early temporary reset domain we created for device */
2483 				amdgpu_reset_put_reset_domain(adev->reset_domain);
2484 				adev->reset_domain = hive->reset_domain;
2485 				amdgpu_put_xgmi_hive(hive);
2486 			}
2487 		}
2488 	}
2489 
2490 	r = amdgpu_device_init_schedulers(adev);
2491 	if (r)
2492 		goto init_failed;
2493 
2494 	/* Don't init kfd if whole hive need to be reset during init */
2495 	if (!adev->gmc.xgmi.pending_reset)
2496 		amdgpu_amdkfd_device_init(adev);
2497 
2498 	amdgpu_fru_get_product_info(adev);
2499 
2500 init_failed:
2501 	if (amdgpu_sriov_vf(adev))
2502 		amdgpu_virt_release_full_gpu(adev, true);
2503 
2504 	return r;
2505 }
2506 
2507 /**
2508  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2509  *
2510  * @adev: amdgpu_device pointer
2511  *
2512  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2513  * this function before a GPU reset.  If the value is retained after a
2514  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2515  */
2516 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2517 {
2518 	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2519 }
2520 
2521 /**
2522  * amdgpu_device_check_vram_lost - check if vram is valid
2523  *
2524  * @adev: amdgpu_device pointer
2525  *
2526  * Checks the reset magic value written to the gart pointer in VRAM.
2527  * The driver calls this after a GPU reset to see if the contents of
2528  * VRAM is lost or now.
2529  * returns true if vram is lost, false if not.
2530  */
2531 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2532 {
2533 	if (memcmp(adev->gart.ptr, adev->reset_magic,
2534 			AMDGPU_RESET_MAGIC_NUM))
2535 		return true;
2536 
2537 	if (!amdgpu_in_reset(adev))
2538 		return false;
2539 
2540 	/*
2541 	 * For all ASICs with baco/mode1 reset, the VRAM is
2542 	 * always assumed to be lost.
2543 	 */
2544 	switch (amdgpu_asic_reset_method(adev)) {
2545 	case AMD_RESET_METHOD_BACO:
2546 	case AMD_RESET_METHOD_MODE1:
2547 		return true;
2548 	default:
2549 		return false;
2550 	}
2551 }
2552 
2553 /**
2554  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2555  *
2556  * @adev: amdgpu_device pointer
2557  * @state: clockgating state (gate or ungate)
2558  *
2559  * The list of all the hardware IPs that make up the asic is walked and the
2560  * set_clockgating_state callbacks are run.
2561  * Late initialization pass enabling clockgating for hardware IPs.
2562  * Fini or suspend, pass disabling clockgating for hardware IPs.
2563  * Returns 0 on success, negative error code on failure.
2564  */
2565 
2566 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2567 			       enum amd_clockgating_state state)
2568 {
2569 	int i, j, r;
2570 
2571 	if (amdgpu_emu_mode == 1)
2572 		return 0;
2573 
2574 	for (j = 0; j < adev->num_ip_blocks; j++) {
2575 		i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2576 		if (!adev->ip_blocks[i].status.late_initialized)
2577 			continue;
2578 		/* skip CG for GFX on S0ix */
2579 		if (adev->in_s0ix &&
2580 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2581 			continue;
2582 		/* skip CG for VCE/UVD, it's handled specially */
2583 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2584 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2585 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2586 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2587 		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2588 			/* enable clockgating to save power */
2589 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2590 										     state);
2591 			if (r) {
2592 				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2593 					  adev->ip_blocks[i].version->funcs->name, r);
2594 				return r;
2595 			}
2596 		}
2597 	}
2598 
2599 	return 0;
2600 }
2601 
2602 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2603 			       enum amd_powergating_state state)
2604 {
2605 	int i, j, r;
2606 
2607 	if (amdgpu_emu_mode == 1)
2608 		return 0;
2609 
2610 	for (j = 0; j < adev->num_ip_blocks; j++) {
2611 		i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2612 		if (!adev->ip_blocks[i].status.late_initialized)
2613 			continue;
2614 		/* skip PG for GFX on S0ix */
2615 		if (adev->in_s0ix &&
2616 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2617 			continue;
2618 		/* skip CG for VCE/UVD, it's handled specially */
2619 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2620 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2621 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2622 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2623 		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
2624 			/* enable powergating to save power */
2625 			r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2626 											state);
2627 			if (r) {
2628 				DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2629 					  adev->ip_blocks[i].version->funcs->name, r);
2630 				return r;
2631 			}
2632 		}
2633 	}
2634 	return 0;
2635 }
2636 
2637 static int amdgpu_device_enable_mgpu_fan_boost(void)
2638 {
2639 	struct amdgpu_gpu_instance *gpu_ins;
2640 	struct amdgpu_device *adev;
2641 	int i, ret = 0;
2642 
2643 	mutex_lock(&mgpu_info.mutex);
2644 
2645 	/*
2646 	 * MGPU fan boost feature should be enabled
2647 	 * only when there are two or more dGPUs in
2648 	 * the system
2649 	 */
2650 	if (mgpu_info.num_dgpu < 2)
2651 		goto out;
2652 
2653 	for (i = 0; i < mgpu_info.num_dgpu; i++) {
2654 		gpu_ins = &(mgpu_info.gpu_ins[i]);
2655 		adev = gpu_ins->adev;
2656 		if (!(adev->flags & AMD_IS_APU) &&
2657 		    !gpu_ins->mgpu_fan_enabled) {
2658 			ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2659 			if (ret)
2660 				break;
2661 
2662 			gpu_ins->mgpu_fan_enabled = 1;
2663 		}
2664 	}
2665 
2666 out:
2667 	mutex_unlock(&mgpu_info.mutex);
2668 
2669 	return ret;
2670 }
2671 
2672 /**
2673  * amdgpu_device_ip_late_init - run late init for hardware IPs
2674  *
2675  * @adev: amdgpu_device pointer
2676  *
2677  * Late initialization pass for hardware IPs.  The list of all the hardware
2678  * IPs that make up the asic is walked and the late_init callbacks are run.
2679  * late_init covers any special initialization that an IP requires
2680  * after all of the have been initialized or something that needs to happen
2681  * late in the init process.
2682  * Returns 0 on success, negative error code on failure.
2683  */
2684 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2685 {
2686 	struct amdgpu_gpu_instance *gpu_instance;
2687 	int i = 0, r;
2688 
2689 	for (i = 0; i < adev->num_ip_blocks; i++) {
2690 		if (!adev->ip_blocks[i].status.hw)
2691 			continue;
2692 		if (adev->ip_blocks[i].version->funcs->late_init) {
2693 			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2694 			if (r) {
2695 				DRM_ERROR("late_init of IP block <%s> failed %d\n",
2696 					  adev->ip_blocks[i].version->funcs->name, r);
2697 				return r;
2698 			}
2699 		}
2700 		adev->ip_blocks[i].status.late_initialized = true;
2701 	}
2702 
2703 	r = amdgpu_ras_late_init(adev);
2704 	if (r) {
2705 		DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2706 		return r;
2707 	}
2708 
2709 	amdgpu_ras_set_error_query_ready(adev, true);
2710 
2711 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2712 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2713 
2714 	amdgpu_device_fill_reset_magic(adev);
2715 
2716 	r = amdgpu_device_enable_mgpu_fan_boost();
2717 	if (r)
2718 		DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2719 
2720 	/* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2721 	if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
2722 			       adev->asic_type == CHIP_ALDEBARAN ))
2723 		amdgpu_dpm_handle_passthrough_sbr(adev, true);
2724 
2725 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2726 		mutex_lock(&mgpu_info.mutex);
2727 
2728 		/*
2729 		 * Reset device p-state to low as this was booted with high.
2730 		 *
2731 		 * This should be performed only after all devices from the same
2732 		 * hive get initialized.
2733 		 *
2734 		 * However, it's unknown how many device in the hive in advance.
2735 		 * As this is counted one by one during devices initializations.
2736 		 *
2737 		 * So, we wait for all XGMI interlinked devices initialized.
2738 		 * This may bring some delays as those devices may come from
2739 		 * different hives. But that should be OK.
2740 		 */
2741 		if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2742 			for (i = 0; i < mgpu_info.num_gpu; i++) {
2743 				gpu_instance = &(mgpu_info.gpu_ins[i]);
2744 				if (gpu_instance->adev->flags & AMD_IS_APU)
2745 					continue;
2746 
2747 				r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2748 						AMDGPU_XGMI_PSTATE_MIN);
2749 				if (r) {
2750 					DRM_ERROR("pstate setting failed (%d).\n", r);
2751 					break;
2752 				}
2753 			}
2754 		}
2755 
2756 		mutex_unlock(&mgpu_info.mutex);
2757 	}
2758 
2759 	return 0;
2760 }
2761 
2762 /**
2763  * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2764  *
2765  * @adev: amdgpu_device pointer
2766  *
2767  * For ASICs need to disable SMC first
2768  */
2769 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2770 {
2771 	int i, r;
2772 
2773 	if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2774 		return;
2775 
2776 	for (i = 0; i < adev->num_ip_blocks; i++) {
2777 		if (!adev->ip_blocks[i].status.hw)
2778 			continue;
2779 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2780 			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2781 			/* XXX handle errors */
2782 			if (r) {
2783 				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2784 					  adev->ip_blocks[i].version->funcs->name, r);
2785 			}
2786 			adev->ip_blocks[i].status.hw = false;
2787 			break;
2788 		}
2789 	}
2790 }
2791 
2792 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2793 {
2794 	int i, r;
2795 
2796 	for (i = 0; i < adev->num_ip_blocks; i++) {
2797 		if (!adev->ip_blocks[i].version->funcs->early_fini)
2798 			continue;
2799 
2800 		r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2801 		if (r) {
2802 			DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2803 				  adev->ip_blocks[i].version->funcs->name, r);
2804 		}
2805 	}
2806 
2807 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2808 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2809 
2810 	amdgpu_amdkfd_suspend(adev, false);
2811 
2812 	/* Workaroud for ASICs need to disable SMC first */
2813 	amdgpu_device_smu_fini_early(adev);
2814 
2815 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2816 		if (!adev->ip_blocks[i].status.hw)
2817 			continue;
2818 
2819 		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2820 		/* XXX handle errors */
2821 		if (r) {
2822 			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2823 				  adev->ip_blocks[i].version->funcs->name, r);
2824 		}
2825 
2826 		adev->ip_blocks[i].status.hw = false;
2827 	}
2828 
2829 	if (amdgpu_sriov_vf(adev)) {
2830 		if (amdgpu_virt_release_full_gpu(adev, false))
2831 			DRM_ERROR("failed to release exclusive mode on fini\n");
2832 	}
2833 
2834 	return 0;
2835 }
2836 
2837 /**
2838  * amdgpu_device_ip_fini - run fini for hardware IPs
2839  *
2840  * @adev: amdgpu_device pointer
2841  *
2842  * Main teardown pass for hardware IPs.  The list of all the hardware
2843  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2844  * are run.  hw_fini tears down the hardware associated with each IP
2845  * and sw_fini tears down any software state associated with each IP.
2846  * Returns 0 on success, negative error code on failure.
2847  */
2848 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2849 {
2850 	int i, r;
2851 
2852 	if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2853 		amdgpu_virt_release_ras_err_handler_data(adev);
2854 
2855 	if (adev->gmc.xgmi.num_physical_nodes > 1)
2856 		amdgpu_xgmi_remove_device(adev);
2857 
2858 	amdgpu_amdkfd_device_fini_sw(adev);
2859 
2860 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2861 		if (!adev->ip_blocks[i].status.sw)
2862 			continue;
2863 
2864 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2865 			amdgpu_ucode_free_bo(adev);
2866 			amdgpu_free_static_csa(&adev->virt.csa_obj);
2867 			amdgpu_device_wb_fini(adev);
2868 			amdgpu_device_vram_scratch_fini(adev);
2869 			amdgpu_ib_pool_fini(adev);
2870 		}
2871 
2872 		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2873 		/* XXX handle errors */
2874 		if (r) {
2875 			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2876 				  adev->ip_blocks[i].version->funcs->name, r);
2877 		}
2878 		adev->ip_blocks[i].status.sw = false;
2879 		adev->ip_blocks[i].status.valid = false;
2880 	}
2881 
2882 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2883 		if (!adev->ip_blocks[i].status.late_initialized)
2884 			continue;
2885 		if (adev->ip_blocks[i].version->funcs->late_fini)
2886 			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2887 		adev->ip_blocks[i].status.late_initialized = false;
2888 	}
2889 
2890 	amdgpu_ras_fini(adev);
2891 
2892 	return 0;
2893 }
2894 
2895 /**
2896  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2897  *
2898  * @work: work_struct.
2899  */
2900 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2901 {
2902 	struct amdgpu_device *adev =
2903 		container_of(work, struct amdgpu_device, delayed_init_work.work);
2904 	int r;
2905 
2906 	r = amdgpu_ib_ring_tests(adev);
2907 	if (r)
2908 		DRM_ERROR("ib ring test failed (%d).\n", r);
2909 }
2910 
2911 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2912 {
2913 	struct amdgpu_device *adev =
2914 		container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2915 
2916 	WARN_ON_ONCE(adev->gfx.gfx_off_state);
2917 	WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2918 
2919 	if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2920 		adev->gfx.gfx_off_state = true;
2921 }
2922 
2923 /**
2924  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2925  *
2926  * @adev: amdgpu_device pointer
2927  *
2928  * Main suspend function for hardware IPs.  The list of all the hardware
2929  * IPs that make up the asic is walked, clockgating is disabled and the
2930  * suspend callbacks are run.  suspend puts the hardware and software state
2931  * in each IP into a state suitable for suspend.
2932  * Returns 0 on success, negative error code on failure.
2933  */
2934 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2935 {
2936 	int i, r;
2937 
2938 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2939 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2940 
2941 	/*
2942 	 * Per PMFW team's suggestion, driver needs to handle gfxoff
2943 	 * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
2944 	 * scenario. Add the missing df cstate disablement here.
2945 	 */
2946 	if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
2947 		dev_warn(adev->dev, "Failed to disallow df cstate");
2948 
2949 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2950 		if (!adev->ip_blocks[i].status.valid)
2951 			continue;
2952 
2953 		/* displays are handled separately */
2954 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2955 			continue;
2956 
2957 		/* XXX handle errors */
2958 		r = adev->ip_blocks[i].version->funcs->suspend(adev);
2959 		/* XXX handle errors */
2960 		if (r) {
2961 			DRM_ERROR("suspend of IP block <%s> failed %d\n",
2962 				  adev->ip_blocks[i].version->funcs->name, r);
2963 			return r;
2964 		}
2965 
2966 		adev->ip_blocks[i].status.hw = false;
2967 	}
2968 
2969 	return 0;
2970 }
2971 
2972 /**
2973  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2974  *
2975  * @adev: amdgpu_device pointer
2976  *
2977  * Main suspend function for hardware IPs.  The list of all the hardware
2978  * IPs that make up the asic is walked, clockgating is disabled and the
2979  * suspend callbacks are run.  suspend puts the hardware and software state
2980  * in each IP into a state suitable for suspend.
2981  * Returns 0 on success, negative error code on failure.
2982  */
2983 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2984 {
2985 	int i, r;
2986 
2987 	if (adev->in_s0ix)
2988 		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
2989 
2990 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2991 		if (!adev->ip_blocks[i].status.valid)
2992 			continue;
2993 		/* displays are handled in phase1 */
2994 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2995 			continue;
2996 		/* PSP lost connection when err_event_athub occurs */
2997 		if (amdgpu_ras_intr_triggered() &&
2998 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2999 			adev->ip_blocks[i].status.hw = false;
3000 			continue;
3001 		}
3002 
3003 		/* skip unnecessary suspend if we do not initialize them yet */
3004 		if (adev->gmc.xgmi.pending_reset &&
3005 		    !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3006 		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
3007 		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3008 		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
3009 			adev->ip_blocks[i].status.hw = false;
3010 			continue;
3011 		}
3012 
3013 		/* skip suspend of gfx and psp for S0ix
3014 		 * gfx is in gfxoff state, so on resume it will exit gfxoff just
3015 		 * like at runtime. PSP is also part of the always on hardware
3016 		 * so no need to suspend it.
3017 		 */
3018 		if (adev->in_s0ix &&
3019 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3020 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
3021 			continue;
3022 
3023 		/* XXX handle errors */
3024 		r = adev->ip_blocks[i].version->funcs->suspend(adev);
3025 		/* XXX handle errors */
3026 		if (r) {
3027 			DRM_ERROR("suspend of IP block <%s> failed %d\n",
3028 				  adev->ip_blocks[i].version->funcs->name, r);
3029 		}
3030 		adev->ip_blocks[i].status.hw = false;
3031 		/* handle putting the SMC in the appropriate state */
3032 		if(!amdgpu_sriov_vf(adev)){
3033 			if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3034 				r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3035 				if (r) {
3036 					DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3037 							adev->mp1_state, r);
3038 					return r;
3039 				}
3040 			}
3041 		}
3042 	}
3043 
3044 	return 0;
3045 }
3046 
3047 /**
3048  * amdgpu_device_ip_suspend - run suspend for hardware IPs
3049  *
3050  * @adev: amdgpu_device pointer
3051  *
3052  * Main suspend function for hardware IPs.  The list of all the hardware
3053  * IPs that make up the asic is walked, clockgating is disabled and the
3054  * suspend callbacks are run.  suspend puts the hardware and software state
3055  * in each IP into a state suitable for suspend.
3056  * Returns 0 on success, negative error code on failure.
3057  */
3058 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3059 {
3060 	int r;
3061 
3062 	if (amdgpu_sriov_vf(adev)) {
3063 		amdgpu_virt_fini_data_exchange(adev);
3064 		amdgpu_virt_request_full_gpu(adev, false);
3065 	}
3066 
3067 	r = amdgpu_device_ip_suspend_phase1(adev);
3068 	if (r)
3069 		return r;
3070 	r = amdgpu_device_ip_suspend_phase2(adev);
3071 
3072 	if (amdgpu_sriov_vf(adev))
3073 		amdgpu_virt_release_full_gpu(adev, false);
3074 
3075 	return r;
3076 }
3077 
3078 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3079 {
3080 	int i, r;
3081 
3082 	static enum amd_ip_block_type ip_order[] = {
3083 		AMD_IP_BLOCK_TYPE_COMMON,
3084 		AMD_IP_BLOCK_TYPE_GMC,
3085 		AMD_IP_BLOCK_TYPE_PSP,
3086 		AMD_IP_BLOCK_TYPE_IH,
3087 	};
3088 
3089 	for (i = 0; i < adev->num_ip_blocks; i++) {
3090 		int j;
3091 		struct amdgpu_ip_block *block;
3092 
3093 		block = &adev->ip_blocks[i];
3094 		block->status.hw = false;
3095 
3096 		for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3097 
3098 			if (block->version->type != ip_order[j] ||
3099 				!block->status.valid)
3100 				continue;
3101 
3102 			r = block->version->funcs->hw_init(adev);
3103 			DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3104 			if (r)
3105 				return r;
3106 			block->status.hw = true;
3107 		}
3108 	}
3109 
3110 	return 0;
3111 }
3112 
3113 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3114 {
3115 	int i, r;
3116 
3117 	static enum amd_ip_block_type ip_order[] = {
3118 		AMD_IP_BLOCK_TYPE_SMC,
3119 		AMD_IP_BLOCK_TYPE_DCE,
3120 		AMD_IP_BLOCK_TYPE_GFX,
3121 		AMD_IP_BLOCK_TYPE_SDMA,
3122 		AMD_IP_BLOCK_TYPE_UVD,
3123 		AMD_IP_BLOCK_TYPE_VCE,
3124 		AMD_IP_BLOCK_TYPE_VCN
3125 	};
3126 
3127 	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3128 		int j;
3129 		struct amdgpu_ip_block *block;
3130 
3131 		for (j = 0; j < adev->num_ip_blocks; j++) {
3132 			block = &adev->ip_blocks[j];
3133 
3134 			if (block->version->type != ip_order[i] ||
3135 				!block->status.valid ||
3136 				block->status.hw)
3137 				continue;
3138 
3139 			if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3140 				r = block->version->funcs->resume(adev);
3141 			else
3142 				r = block->version->funcs->hw_init(adev);
3143 
3144 			DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3145 			if (r)
3146 				return r;
3147 			block->status.hw = true;
3148 		}
3149 	}
3150 
3151 	return 0;
3152 }
3153 
3154 /**
3155  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3156  *
3157  * @adev: amdgpu_device pointer
3158  *
3159  * First resume function for hardware IPs.  The list of all the hardware
3160  * IPs that make up the asic is walked and the resume callbacks are run for
3161  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3162  * after a suspend and updates the software state as necessary.  This
3163  * function is also used for restoring the GPU after a GPU reset.
3164  * Returns 0 on success, negative error code on failure.
3165  */
3166 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3167 {
3168 	int i, r;
3169 
3170 	for (i = 0; i < adev->num_ip_blocks; i++) {
3171 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3172 			continue;
3173 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3174 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3175 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3176 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3177 
3178 			r = adev->ip_blocks[i].version->funcs->resume(adev);
3179 			if (r) {
3180 				DRM_ERROR("resume of IP block <%s> failed %d\n",
3181 					  adev->ip_blocks[i].version->funcs->name, r);
3182 				return r;
3183 			}
3184 			adev->ip_blocks[i].status.hw = true;
3185 		}
3186 	}
3187 
3188 	return 0;
3189 }
3190 
3191 /**
3192  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3193  *
3194  * @adev: amdgpu_device pointer
3195  *
3196  * First resume function for hardware IPs.  The list of all the hardware
3197  * IPs that make up the asic is walked and the resume callbacks are run for
3198  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3199  * functional state after a suspend and updates the software state as
3200  * necessary.  This function is also used for restoring the GPU after a GPU
3201  * reset.
3202  * Returns 0 on success, negative error code on failure.
3203  */
3204 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3205 {
3206 	int i, r;
3207 
3208 	for (i = 0; i < adev->num_ip_blocks; i++) {
3209 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3210 			continue;
3211 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3212 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3213 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3214 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3215 			continue;
3216 		r = adev->ip_blocks[i].version->funcs->resume(adev);
3217 		if (r) {
3218 			DRM_ERROR("resume of IP block <%s> failed %d\n",
3219 				  adev->ip_blocks[i].version->funcs->name, r);
3220 			return r;
3221 		}
3222 		adev->ip_blocks[i].status.hw = true;
3223 
3224 		if (adev->in_s0ix && adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3225 			/* disable gfxoff for IP resume. The gfxoff will be re-enabled in
3226 			 * amdgpu_device_resume() after IP resume.
3227 			 */
3228 			amdgpu_gfx_off_ctrl(adev, false);
3229 			DRM_DEBUG("will disable gfxoff for re-initializing other blocks\n");
3230 		}
3231 
3232 	}
3233 
3234 	return 0;
3235 }
3236 
3237 /**
3238  * amdgpu_device_ip_resume - run resume for hardware IPs
3239  *
3240  * @adev: amdgpu_device pointer
3241  *
3242  * Main resume function for hardware IPs.  The hardware IPs
3243  * are split into two resume functions because they are
3244  * are also used in in recovering from a GPU reset and some additional
3245  * steps need to be take between them.  In this case (S3/S4) they are
3246  * run sequentially.
3247  * Returns 0 on success, negative error code on failure.
3248  */
3249 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3250 {
3251 	int r;
3252 
3253 	r = amdgpu_amdkfd_resume_iommu(adev);
3254 	if (r)
3255 		return r;
3256 
3257 	r = amdgpu_device_ip_resume_phase1(adev);
3258 	if (r)
3259 		return r;
3260 
3261 	r = amdgpu_device_fw_loading(adev);
3262 	if (r)
3263 		return r;
3264 
3265 	r = amdgpu_device_ip_resume_phase2(adev);
3266 
3267 	return r;
3268 }
3269 
3270 /**
3271  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3272  *
3273  * @adev: amdgpu_device pointer
3274  *
3275  * Query the VBIOS data tables to determine if the board supports SR-IOV.
3276  */
3277 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3278 {
3279 	if (amdgpu_sriov_vf(adev)) {
3280 		if (adev->is_atom_fw) {
3281 			if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3282 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3283 		} else {
3284 			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3285 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3286 		}
3287 
3288 		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3289 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3290 	}
3291 }
3292 
3293 /**
3294  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3295  *
3296  * @asic_type: AMD asic type
3297  *
3298  * Check if there is DC (new modesetting infrastructre) support for an asic.
3299  * returns true if DC has support, false if not.
3300  */
3301 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3302 {
3303 	switch (asic_type) {
3304 #ifdef CONFIG_DRM_AMDGPU_SI
3305 	case CHIP_HAINAN:
3306 #endif
3307 	case CHIP_TOPAZ:
3308 		/* chips with no display hardware */
3309 		return false;
3310 #if defined(CONFIG_DRM_AMD_DC)
3311 	case CHIP_TAHITI:
3312 	case CHIP_PITCAIRN:
3313 	case CHIP_VERDE:
3314 	case CHIP_OLAND:
3315 		/*
3316 		 * We have systems in the wild with these ASICs that require
3317 		 * LVDS and VGA support which is not supported with DC.
3318 		 *
3319 		 * Fallback to the non-DC driver here by default so as not to
3320 		 * cause regressions.
3321 		 */
3322 #if defined(CONFIG_DRM_AMD_DC_SI)
3323 		return amdgpu_dc > 0;
3324 #else
3325 		return false;
3326 #endif
3327 	case CHIP_BONAIRE:
3328 	case CHIP_KAVERI:
3329 	case CHIP_KABINI:
3330 	case CHIP_MULLINS:
3331 		/*
3332 		 * We have systems in the wild with these ASICs that require
3333 		 * VGA support which is not supported with DC.
3334 		 *
3335 		 * Fallback to the non-DC driver here by default so as not to
3336 		 * cause regressions.
3337 		 */
3338 		return amdgpu_dc > 0;
3339 	default:
3340 		return amdgpu_dc != 0;
3341 #else
3342 	default:
3343 		if (amdgpu_dc > 0)
3344 			DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3345 					 "but isn't supported by ASIC, ignoring\n");
3346 		return false;
3347 #endif
3348 	}
3349 }
3350 
3351 /**
3352  * amdgpu_device_has_dc_support - check if dc is supported
3353  *
3354  * @adev: amdgpu_device pointer
3355  *
3356  * Returns true for supported, false for not supported
3357  */
3358 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3359 {
3360 	if (adev->enable_virtual_display ||
3361 	    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3362 		return false;
3363 
3364 	return amdgpu_device_asic_has_dc_support(adev->asic_type);
3365 }
3366 
3367 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3368 {
3369 	struct amdgpu_device *adev =
3370 		container_of(__work, struct amdgpu_device, xgmi_reset_work);
3371 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3372 
3373 	/* It's a bug to not have a hive within this function */
3374 	if (WARN_ON(!hive))
3375 		return;
3376 
3377 	/*
3378 	 * Use task barrier to synchronize all xgmi reset works across the
3379 	 * hive. task_barrier_enter and task_barrier_exit will block
3380 	 * until all the threads running the xgmi reset works reach
3381 	 * those points. task_barrier_full will do both blocks.
3382 	 */
3383 	if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3384 
3385 		task_barrier_enter(&hive->tb);
3386 		adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3387 
3388 		if (adev->asic_reset_res)
3389 			goto fail;
3390 
3391 		task_barrier_exit(&hive->tb);
3392 		adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3393 
3394 		if (adev->asic_reset_res)
3395 			goto fail;
3396 
3397 		if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3398 		    adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3399 			adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3400 	} else {
3401 
3402 		task_barrier_full(&hive->tb);
3403 		adev->asic_reset_res =  amdgpu_asic_reset(adev);
3404 	}
3405 
3406 fail:
3407 	if (adev->asic_reset_res)
3408 		DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3409 			 adev->asic_reset_res, adev_to_drm(adev)->unique);
3410 	amdgpu_put_xgmi_hive(hive);
3411 }
3412 
3413 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3414 {
3415 	char *input = amdgpu_lockup_timeout;
3416 	char *timeout_setting = NULL;
3417 	int index = 0;
3418 	long timeout;
3419 	int ret = 0;
3420 
3421 	/*
3422 	 * By default timeout for non compute jobs is 10000
3423 	 * and 60000 for compute jobs.
3424 	 * In SR-IOV or passthrough mode, timeout for compute
3425 	 * jobs are 60000 by default.
3426 	 */
3427 	adev->gfx_timeout = msecs_to_jiffies(10000);
3428 	adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3429 	if (amdgpu_sriov_vf(adev))
3430 		adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3431 					msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3432 	else
3433 		adev->compute_timeout =  msecs_to_jiffies(60000);
3434 
3435 	if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3436 		while ((timeout_setting = strsep(&input, ",")) &&
3437 				strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3438 			ret = kstrtol(timeout_setting, 0, &timeout);
3439 			if (ret)
3440 				return ret;
3441 
3442 			if (timeout == 0) {
3443 				index++;
3444 				continue;
3445 			} else if (timeout < 0) {
3446 				timeout = MAX_SCHEDULE_TIMEOUT;
3447 				dev_warn(adev->dev, "lockup timeout disabled");
3448 				add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3449 			} else {
3450 				timeout = msecs_to_jiffies(timeout);
3451 			}
3452 
3453 			switch (index++) {
3454 			case 0:
3455 				adev->gfx_timeout = timeout;
3456 				break;
3457 			case 1:
3458 				adev->compute_timeout = timeout;
3459 				break;
3460 			case 2:
3461 				adev->sdma_timeout = timeout;
3462 				break;
3463 			case 3:
3464 				adev->video_timeout = timeout;
3465 				break;
3466 			default:
3467 				break;
3468 			}
3469 		}
3470 		/*
3471 		 * There is only one value specified and
3472 		 * it should apply to all non-compute jobs.
3473 		 */
3474 		if (index == 1) {
3475 			adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3476 			if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3477 				adev->compute_timeout = adev->gfx_timeout;
3478 		}
3479 	}
3480 
3481 	return ret;
3482 }
3483 
3484 /**
3485  * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3486  *
3487  * @adev: amdgpu_device pointer
3488  *
3489  * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3490  */
3491 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3492 {
3493 	struct iommu_domain *domain;
3494 
3495 	domain = iommu_get_domain_for_dev(adev->dev);
3496 	if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3497 		adev->ram_is_direct_mapped = true;
3498 }
3499 
3500 static const struct attribute *amdgpu_dev_attributes[] = {
3501 	&dev_attr_product_name.attr,
3502 	&dev_attr_product_number.attr,
3503 	&dev_attr_serial_number.attr,
3504 	&dev_attr_pcie_replay_count.attr,
3505 	NULL
3506 };
3507 
3508 /**
3509  * amdgpu_device_init - initialize the driver
3510  *
3511  * @adev: amdgpu_device pointer
3512  * @flags: driver flags
3513  *
3514  * Initializes the driver info and hw (all asics).
3515  * Returns 0 for success or an error on failure.
3516  * Called at driver startup.
3517  */
3518 int amdgpu_device_init(struct amdgpu_device *adev,
3519 		       uint32_t flags)
3520 {
3521 	struct drm_device *ddev = adev_to_drm(adev);
3522 	struct pci_dev *pdev = adev->pdev;
3523 	int r, i;
3524 	bool px = false;
3525 	u32 max_MBps;
3526 
3527 	adev->shutdown = false;
3528 	adev->flags = flags;
3529 
3530 	if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3531 		adev->asic_type = amdgpu_force_asic_type;
3532 	else
3533 		adev->asic_type = flags & AMD_ASIC_MASK;
3534 
3535 	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3536 	if (amdgpu_emu_mode == 1)
3537 		adev->usec_timeout *= 10;
3538 	adev->gmc.gart_size = 512 * 1024 * 1024;
3539 	adev->accel_working = false;
3540 	adev->num_rings = 0;
3541 	RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3542 	adev->mman.buffer_funcs = NULL;
3543 	adev->mman.buffer_funcs_ring = NULL;
3544 	adev->vm_manager.vm_pte_funcs = NULL;
3545 	adev->vm_manager.vm_pte_num_scheds = 0;
3546 	adev->gmc.gmc_funcs = NULL;
3547 	adev->harvest_ip_mask = 0x0;
3548 	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3549 	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3550 
3551 	adev->smc_rreg = &amdgpu_invalid_rreg;
3552 	adev->smc_wreg = &amdgpu_invalid_wreg;
3553 	adev->pcie_rreg = &amdgpu_invalid_rreg;
3554 	adev->pcie_wreg = &amdgpu_invalid_wreg;
3555 	adev->pciep_rreg = &amdgpu_invalid_rreg;
3556 	adev->pciep_wreg = &amdgpu_invalid_wreg;
3557 	adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3558 	adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3559 	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3560 	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3561 	adev->didt_rreg = &amdgpu_invalid_rreg;
3562 	adev->didt_wreg = &amdgpu_invalid_wreg;
3563 	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3564 	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3565 	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3566 	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3567 
3568 	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3569 		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3570 		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3571 
3572 	/* mutex initialization are all done here so we
3573 	 * can recall function without having locking issues */
3574 	mutex_init(&adev->firmware.mutex);
3575 	mutex_init(&adev->pm.mutex);
3576 	mutex_init(&adev->gfx.gpu_clock_mutex);
3577 	mutex_init(&adev->srbm_mutex);
3578 	mutex_init(&adev->gfx.pipe_reserve_mutex);
3579 	mutex_init(&adev->gfx.gfx_off_mutex);
3580 	mutex_init(&adev->grbm_idx_mutex);
3581 	mutex_init(&adev->mn_lock);
3582 	mutex_init(&adev->virt.vf_errors.lock);
3583 	hash_init(adev->mn_hash);
3584 	mutex_init(&adev->psp.mutex);
3585 	mutex_init(&adev->notifier_lock);
3586 	mutex_init(&adev->pm.stable_pstate_ctx_lock);
3587 	mutex_init(&adev->benchmark_mutex);
3588 
3589 	amdgpu_device_init_apu_flags(adev);
3590 
3591 	r = amdgpu_device_check_arguments(adev);
3592 	if (r)
3593 		return r;
3594 
3595 	spin_lock_init(&adev->mmio_idx_lock);
3596 	spin_lock_init(&adev->smc_idx_lock);
3597 	spin_lock_init(&adev->pcie_idx_lock);
3598 	spin_lock_init(&adev->uvd_ctx_idx_lock);
3599 	spin_lock_init(&adev->didt_idx_lock);
3600 	spin_lock_init(&adev->gc_cac_idx_lock);
3601 	spin_lock_init(&adev->se_cac_idx_lock);
3602 	spin_lock_init(&adev->audio_endpt_idx_lock);
3603 	spin_lock_init(&adev->mm_stats.lock);
3604 
3605 	INIT_LIST_HEAD(&adev->shadow_list);
3606 	mutex_init(&adev->shadow_list_lock);
3607 
3608 	INIT_LIST_HEAD(&adev->reset_list);
3609 
3610 	INIT_LIST_HEAD(&adev->ras_list);
3611 
3612 	INIT_DELAYED_WORK(&adev->delayed_init_work,
3613 			  amdgpu_device_delayed_init_work_handler);
3614 	INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3615 			  amdgpu_device_delay_enable_gfx_off);
3616 
3617 	INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3618 
3619 	adev->gfx.gfx_off_req_count = 1;
3620 	adev->gfx.gfx_off_residency = 0;
3621 	adev->gfx.gfx_off_entrycount = 0;
3622 	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3623 
3624 	atomic_set(&adev->throttling_logging_enabled, 1);
3625 	/*
3626 	 * If throttling continues, logging will be performed every minute
3627 	 * to avoid log flooding. "-1" is subtracted since the thermal
3628 	 * throttling interrupt comes every second. Thus, the total logging
3629 	 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3630 	 * for throttling interrupt) = 60 seconds.
3631 	 */
3632 	ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3633 	ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3634 
3635 	/* Registers mapping */
3636 	/* TODO: block userspace mapping of io register */
3637 	if (adev->asic_type >= CHIP_BONAIRE) {
3638 		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3639 		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3640 	} else {
3641 		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3642 		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3643 	}
3644 
3645 	for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3646 		atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3647 
3648 	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3649 	if (adev->rmmio == NULL) {
3650 		return -ENOMEM;
3651 	}
3652 	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3653 	DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3654 
3655 	amdgpu_device_get_pcie_info(adev);
3656 
3657 	if (amdgpu_mcbp)
3658 		DRM_INFO("MCBP is enabled\n");
3659 
3660 	/*
3661 	 * Reset domain needs to be present early, before XGMI hive discovered
3662 	 * (if any) and intitialized to use reset sem and in_gpu reset flag
3663 	 * early on during init and before calling to RREG32.
3664 	 */
3665 	adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3666 	if (!adev->reset_domain)
3667 		return -ENOMEM;
3668 
3669 	/* detect hw virtualization here */
3670 	amdgpu_detect_virtualization(adev);
3671 
3672 	r = amdgpu_device_get_job_timeout_settings(adev);
3673 	if (r) {
3674 		dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3675 		return r;
3676 	}
3677 
3678 	/* early init functions */
3679 	r = amdgpu_device_ip_early_init(adev);
3680 	if (r)
3681 		return r;
3682 
3683 	/* Enable TMZ based on IP_VERSION */
3684 	amdgpu_gmc_tmz_set(adev);
3685 
3686 	amdgpu_gmc_noretry_set(adev);
3687 	/* Need to get xgmi info early to decide the reset behavior*/
3688 	if (adev->gmc.xgmi.supported) {
3689 		r = adev->gfxhub.funcs->get_xgmi_info(adev);
3690 		if (r)
3691 			return r;
3692 	}
3693 
3694 	/* enable PCIE atomic ops */
3695 	if (amdgpu_sriov_vf(adev))
3696 		adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3697 			adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3698 			(PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3699 	else
3700 		adev->have_atomics_support =
3701 			!pci_enable_atomic_ops_to_root(adev->pdev,
3702 					  PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3703 					  PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3704 	if (!adev->have_atomics_support)
3705 		dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3706 
3707 	/* doorbell bar mapping and doorbell index init*/
3708 	amdgpu_device_doorbell_init(adev);
3709 
3710 	if (amdgpu_emu_mode == 1) {
3711 		/* post the asic on emulation mode */
3712 		emu_soc_asic_init(adev);
3713 		goto fence_driver_init;
3714 	}
3715 
3716 	amdgpu_reset_init(adev);
3717 
3718 	/* detect if we are with an SRIOV vbios */
3719 	amdgpu_device_detect_sriov_bios(adev);
3720 
3721 	/* check if we need to reset the asic
3722 	 *  E.g., driver was not cleanly unloaded previously, etc.
3723 	 */
3724 	if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3725 		if (adev->gmc.xgmi.num_physical_nodes) {
3726 			dev_info(adev->dev, "Pending hive reset.\n");
3727 			adev->gmc.xgmi.pending_reset = true;
3728 			/* Only need to init necessary block for SMU to handle the reset */
3729 			for (i = 0; i < adev->num_ip_blocks; i++) {
3730 				if (!adev->ip_blocks[i].status.valid)
3731 					continue;
3732 				if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3733 				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3734 				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3735 				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3736 					DRM_DEBUG("IP %s disabled for hw_init.\n",
3737 						adev->ip_blocks[i].version->funcs->name);
3738 					adev->ip_blocks[i].status.hw = true;
3739 				}
3740 			}
3741 		} else {
3742 			r = amdgpu_asic_reset(adev);
3743 			if (r) {
3744 				dev_err(adev->dev, "asic reset on init failed\n");
3745 				goto failed;
3746 			}
3747 		}
3748 	}
3749 
3750 	pci_enable_pcie_error_reporting(adev->pdev);
3751 
3752 	/* Post card if necessary */
3753 	if (amdgpu_device_need_post(adev)) {
3754 		if (!adev->bios) {
3755 			dev_err(adev->dev, "no vBIOS found\n");
3756 			r = -EINVAL;
3757 			goto failed;
3758 		}
3759 		DRM_INFO("GPU posting now...\n");
3760 		r = amdgpu_device_asic_init(adev);
3761 		if (r) {
3762 			dev_err(adev->dev, "gpu post error!\n");
3763 			goto failed;
3764 		}
3765 	}
3766 
3767 	if (adev->is_atom_fw) {
3768 		/* Initialize clocks */
3769 		r = amdgpu_atomfirmware_get_clock_info(adev);
3770 		if (r) {
3771 			dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3772 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3773 			goto failed;
3774 		}
3775 	} else {
3776 		/* Initialize clocks */
3777 		r = amdgpu_atombios_get_clock_info(adev);
3778 		if (r) {
3779 			dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3780 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3781 			goto failed;
3782 		}
3783 		/* init i2c buses */
3784 		if (!amdgpu_device_has_dc_support(adev))
3785 			amdgpu_atombios_i2c_init(adev);
3786 	}
3787 
3788 fence_driver_init:
3789 	/* Fence driver */
3790 	r = amdgpu_fence_driver_sw_init(adev);
3791 	if (r) {
3792 		dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3793 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3794 		goto failed;
3795 	}
3796 
3797 	/* init the mode config */
3798 	drm_mode_config_init(adev_to_drm(adev));
3799 
3800 	r = amdgpu_device_ip_init(adev);
3801 	if (r) {
3802 		/* failed in exclusive mode due to timeout */
3803 		if (amdgpu_sriov_vf(adev) &&
3804 		    !amdgpu_sriov_runtime(adev) &&
3805 		    amdgpu_virt_mmio_blocked(adev) &&
3806 		    !amdgpu_virt_wait_reset(adev)) {
3807 			dev_err(adev->dev, "VF exclusive mode timeout\n");
3808 			/* Don't send request since VF is inactive. */
3809 			adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3810 			adev->virt.ops = NULL;
3811 			r = -EAGAIN;
3812 			goto release_ras_con;
3813 		}
3814 		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3815 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3816 		goto release_ras_con;
3817 	}
3818 
3819 	amdgpu_fence_driver_hw_init(adev);
3820 
3821 	dev_info(adev->dev,
3822 		"SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3823 			adev->gfx.config.max_shader_engines,
3824 			adev->gfx.config.max_sh_per_se,
3825 			adev->gfx.config.max_cu_per_sh,
3826 			adev->gfx.cu_info.number);
3827 
3828 	adev->accel_working = true;
3829 
3830 	amdgpu_vm_check_compute_bug(adev);
3831 
3832 	/* Initialize the buffer migration limit. */
3833 	if (amdgpu_moverate >= 0)
3834 		max_MBps = amdgpu_moverate;
3835 	else
3836 		max_MBps = 8; /* Allow 8 MB/s. */
3837 	/* Get a log2 for easy divisions. */
3838 	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3839 
3840 	r = amdgpu_pm_sysfs_init(adev);
3841 	if (r) {
3842 		adev->pm_sysfs_en = false;
3843 		DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3844 	} else
3845 		adev->pm_sysfs_en = true;
3846 
3847 	r = amdgpu_ucode_sysfs_init(adev);
3848 	if (r) {
3849 		adev->ucode_sysfs_en = false;
3850 		DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3851 	} else
3852 		adev->ucode_sysfs_en = true;
3853 
3854 	r = amdgpu_psp_sysfs_init(adev);
3855 	if (r) {
3856 		adev->psp_sysfs_en = false;
3857 		if (!amdgpu_sriov_vf(adev))
3858 			DRM_ERROR("Creating psp sysfs failed\n");
3859 	} else
3860 		adev->psp_sysfs_en = true;
3861 
3862 	/*
3863 	 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3864 	 * Otherwise the mgpu fan boost feature will be skipped due to the
3865 	 * gpu instance is counted less.
3866 	 */
3867 	amdgpu_register_gpu_instance(adev);
3868 
3869 	/* enable clockgating, etc. after ib tests, etc. since some blocks require
3870 	 * explicit gating rather than handling it automatically.
3871 	 */
3872 	if (!adev->gmc.xgmi.pending_reset) {
3873 		r = amdgpu_device_ip_late_init(adev);
3874 		if (r) {
3875 			dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3876 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3877 			goto release_ras_con;
3878 		}
3879 		/* must succeed. */
3880 		amdgpu_ras_resume(adev);
3881 		queue_delayed_work(system_wq, &adev->delayed_init_work,
3882 				   msecs_to_jiffies(AMDGPU_RESUME_MS));
3883 	}
3884 
3885 	if (amdgpu_sriov_vf(adev))
3886 		flush_delayed_work(&adev->delayed_init_work);
3887 
3888 	r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3889 	if (r)
3890 		dev_err(adev->dev, "Could not create amdgpu device attr\n");
3891 
3892 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
3893 		r = amdgpu_pmu_init(adev);
3894 	if (r)
3895 		dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3896 
3897 	/* Have stored pci confspace at hand for restore in sudden PCI error */
3898 	if (amdgpu_device_cache_pci_state(adev->pdev))
3899 		pci_restore_state(pdev);
3900 
3901 	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3902 	/* this will fail for cards that aren't VGA class devices, just
3903 	 * ignore it */
3904 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3905 		vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3906 
3907 	if (amdgpu_device_supports_px(ddev)) {
3908 		px = true;
3909 		vga_switcheroo_register_client(adev->pdev,
3910 					       &amdgpu_switcheroo_ops, px);
3911 		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3912 	}
3913 
3914 	if (adev->gmc.xgmi.pending_reset)
3915 		queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3916 				   msecs_to_jiffies(AMDGPU_RESUME_MS));
3917 
3918 	amdgpu_device_check_iommu_direct_map(adev);
3919 
3920 	return 0;
3921 
3922 release_ras_con:
3923 	amdgpu_release_ras_context(adev);
3924 
3925 failed:
3926 	amdgpu_vf_error_trans_all(adev);
3927 
3928 	return r;
3929 }
3930 
3931 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3932 {
3933 
3934 	/* Clear all CPU mappings pointing to this device */
3935 	unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3936 
3937 	/* Unmap all mapped bars - Doorbell, registers and VRAM */
3938 	amdgpu_device_doorbell_fini(adev);
3939 
3940 	iounmap(adev->rmmio);
3941 	adev->rmmio = NULL;
3942 	if (adev->mman.aper_base_kaddr)
3943 		iounmap(adev->mman.aper_base_kaddr);
3944 	adev->mman.aper_base_kaddr = NULL;
3945 
3946 	/* Memory manager related */
3947 	if (!adev->gmc.xgmi.connected_to_cpu) {
3948 		arch_phys_wc_del(adev->gmc.vram_mtrr);
3949 		arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3950 	}
3951 }
3952 
3953 /**
3954  * amdgpu_device_fini_hw - tear down the driver
3955  *
3956  * @adev: amdgpu_device pointer
3957  *
3958  * Tear down the driver info (all asics).
3959  * Called at driver shutdown.
3960  */
3961 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3962 {
3963 	dev_info(adev->dev, "amdgpu: finishing device.\n");
3964 	flush_delayed_work(&adev->delayed_init_work);
3965 	adev->shutdown = true;
3966 
3967 	/* make sure IB test finished before entering exclusive mode
3968 	 * to avoid preemption on IB test
3969 	 * */
3970 	if (amdgpu_sriov_vf(adev)) {
3971 		amdgpu_virt_request_full_gpu(adev, false);
3972 		amdgpu_virt_fini_data_exchange(adev);
3973 	}
3974 
3975 	/* disable all interrupts */
3976 	amdgpu_irq_disable_all(adev);
3977 	if (adev->mode_info.mode_config_initialized){
3978 		if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
3979 			drm_helper_force_disable_all(adev_to_drm(adev));
3980 		else
3981 			drm_atomic_helper_shutdown(adev_to_drm(adev));
3982 	}
3983 	amdgpu_fence_driver_hw_fini(adev);
3984 
3985 	if (adev->mman.initialized) {
3986 		flush_delayed_work(&adev->mman.bdev.wq);
3987 		ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3988 	}
3989 
3990 	if (adev->pm_sysfs_en)
3991 		amdgpu_pm_sysfs_fini(adev);
3992 	if (adev->ucode_sysfs_en)
3993 		amdgpu_ucode_sysfs_fini(adev);
3994 	if (adev->psp_sysfs_en)
3995 		amdgpu_psp_sysfs_fini(adev);
3996 	sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3997 
3998 	/* disable ras feature must before hw fini */
3999 	amdgpu_ras_pre_fini(adev);
4000 
4001 	amdgpu_device_ip_fini_early(adev);
4002 
4003 	amdgpu_irq_fini_hw(adev);
4004 
4005 	if (adev->mman.initialized)
4006 		ttm_device_clear_dma_mappings(&adev->mman.bdev);
4007 
4008 	amdgpu_gart_dummy_page_fini(adev);
4009 
4010 	amdgpu_device_unmap_mmio(adev);
4011 
4012 }
4013 
4014 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4015 {
4016 	int idx;
4017 
4018 	amdgpu_fence_driver_sw_fini(adev);
4019 	amdgpu_device_ip_fini(adev);
4020 	release_firmware(adev->firmware.gpu_info_fw);
4021 	adev->firmware.gpu_info_fw = NULL;
4022 	adev->accel_working = false;
4023 	dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4024 
4025 	amdgpu_reset_fini(adev);
4026 
4027 	/* free i2c buses */
4028 	if (!amdgpu_device_has_dc_support(adev))
4029 		amdgpu_i2c_fini(adev);
4030 
4031 	if (amdgpu_emu_mode != 1)
4032 		amdgpu_atombios_fini(adev);
4033 
4034 	kfree(adev->bios);
4035 	adev->bios = NULL;
4036 	if (amdgpu_device_supports_px(adev_to_drm(adev))) {
4037 		vga_switcheroo_unregister_client(adev->pdev);
4038 		vga_switcheroo_fini_domain_pm_ops(adev->dev);
4039 	}
4040 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4041 		vga_client_unregister(adev->pdev);
4042 
4043 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4044 
4045 		iounmap(adev->rmmio);
4046 		adev->rmmio = NULL;
4047 		amdgpu_device_doorbell_fini(adev);
4048 		drm_dev_exit(idx);
4049 	}
4050 
4051 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
4052 		amdgpu_pmu_fini(adev);
4053 	if (adev->mman.discovery_bin)
4054 		amdgpu_discovery_fini(adev);
4055 
4056 	amdgpu_reset_put_reset_domain(adev->reset_domain);
4057 	adev->reset_domain = NULL;
4058 
4059 	kfree(adev->pci_state);
4060 
4061 }
4062 
4063 /**
4064  * amdgpu_device_evict_resources - evict device resources
4065  * @adev: amdgpu device object
4066  *
4067  * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4068  * of the vram memory type. Mainly used for evicting device resources
4069  * at suspend time.
4070  *
4071  */
4072 static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4073 {
4074 	int ret;
4075 
4076 	/* No need to evict vram on APUs for suspend to ram or s2idle */
4077 	if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4078 		return 0;
4079 
4080 	ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4081 	if (ret)
4082 		DRM_WARN("evicting device resources failed\n");
4083 	return ret;
4084 }
4085 
4086 /*
4087  * Suspend & resume.
4088  */
4089 /**
4090  * amdgpu_device_suspend - initiate device suspend
4091  *
4092  * @dev: drm dev pointer
4093  * @fbcon : notify the fbdev of suspend
4094  *
4095  * Puts the hw in the suspend state (all asics).
4096  * Returns 0 for success or an error on failure.
4097  * Called at driver suspend.
4098  */
4099 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4100 {
4101 	struct amdgpu_device *adev = drm_to_adev(dev);
4102 	int r = 0;
4103 
4104 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4105 		return 0;
4106 
4107 	adev->in_suspend = true;
4108 
4109 	if (amdgpu_sriov_vf(adev)) {
4110 		amdgpu_virt_fini_data_exchange(adev);
4111 		r = amdgpu_virt_request_full_gpu(adev, false);
4112 		if (r)
4113 			return r;
4114 	}
4115 
4116 	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4117 		DRM_WARN("smart shift update failed\n");
4118 
4119 	drm_kms_helper_poll_disable(dev);
4120 
4121 	if (fbcon)
4122 		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4123 
4124 	cancel_delayed_work_sync(&adev->delayed_init_work);
4125 
4126 	amdgpu_ras_suspend(adev);
4127 
4128 	amdgpu_device_ip_suspend_phase1(adev);
4129 
4130 	if (!adev->in_s0ix)
4131 		amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4132 
4133 	r = amdgpu_device_evict_resources(adev);
4134 	if (r)
4135 		return r;
4136 
4137 	amdgpu_fence_driver_hw_fini(adev);
4138 
4139 	amdgpu_device_ip_suspend_phase2(adev);
4140 
4141 	if (amdgpu_sriov_vf(adev))
4142 		amdgpu_virt_release_full_gpu(adev, false);
4143 
4144 	return 0;
4145 }
4146 
4147 /**
4148  * amdgpu_device_resume - initiate device resume
4149  *
4150  * @dev: drm dev pointer
4151  * @fbcon : notify the fbdev of resume
4152  *
4153  * Bring the hw back to operating state (all asics).
4154  * Returns 0 for success or an error on failure.
4155  * Called at driver resume.
4156  */
4157 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4158 {
4159 	struct amdgpu_device *adev = drm_to_adev(dev);
4160 	int r = 0;
4161 
4162 	if (amdgpu_sriov_vf(adev)) {
4163 		r = amdgpu_virt_request_full_gpu(adev, true);
4164 		if (r)
4165 			return r;
4166 	}
4167 
4168 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4169 		return 0;
4170 
4171 	if (adev->in_s0ix)
4172 		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4173 
4174 	/* post card */
4175 	if (amdgpu_device_need_post(adev)) {
4176 		r = amdgpu_device_asic_init(adev);
4177 		if (r)
4178 			dev_err(adev->dev, "amdgpu asic init failed\n");
4179 	}
4180 
4181 	r = amdgpu_device_ip_resume(adev);
4182 
4183 	/* no matter what r is, always need to properly release full GPU */
4184 	if (amdgpu_sriov_vf(adev)) {
4185 		amdgpu_virt_init_data_exchange(adev);
4186 		amdgpu_virt_release_full_gpu(adev, true);
4187 	}
4188 
4189 	if (r) {
4190 		dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4191 		return r;
4192 	}
4193 	amdgpu_fence_driver_hw_init(adev);
4194 
4195 	r = amdgpu_device_ip_late_init(adev);
4196 	if (r)
4197 		return r;
4198 
4199 	queue_delayed_work(system_wq, &adev->delayed_init_work,
4200 			   msecs_to_jiffies(AMDGPU_RESUME_MS));
4201 
4202 	if (!adev->in_s0ix) {
4203 		r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4204 		if (r)
4205 			return r;
4206 	}
4207 
4208 	/* Make sure IB tests flushed */
4209 	if (amdgpu_sriov_vf(adev))
4210 		amdgpu_irq_gpu_reset_resume_helper(adev);
4211 	flush_delayed_work(&adev->delayed_init_work);
4212 
4213 	if (adev->in_s0ix) {
4214 		/* re-enable gfxoff after IP resume. This re-enables gfxoff after
4215 		 * it was disabled for IP resume in amdgpu_device_ip_resume_phase2().
4216 		 */
4217 		amdgpu_gfx_off_ctrl(adev, true);
4218 		DRM_DEBUG("will enable gfxoff for the mission mode\n");
4219 	}
4220 	if (fbcon)
4221 		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4222 
4223 	drm_kms_helper_poll_enable(dev);
4224 
4225 	amdgpu_ras_resume(adev);
4226 
4227 	if (adev->mode_info.num_crtc) {
4228 		/*
4229 		 * Most of the connector probing functions try to acquire runtime pm
4230 		 * refs to ensure that the GPU is powered on when connector polling is
4231 		 * performed. Since we're calling this from a runtime PM callback,
4232 		 * trying to acquire rpm refs will cause us to deadlock.
4233 		 *
4234 		 * Since we're guaranteed to be holding the rpm lock, it's safe to
4235 		 * temporarily disable the rpm helpers so this doesn't deadlock us.
4236 		 */
4237 #ifdef CONFIG_PM
4238 		dev->dev->power.disable_depth++;
4239 #endif
4240 		if (!adev->dc_enabled)
4241 			drm_helper_hpd_irq_event(dev);
4242 		else
4243 			drm_kms_helper_hotplug_event(dev);
4244 #ifdef CONFIG_PM
4245 		dev->dev->power.disable_depth--;
4246 #endif
4247 	}
4248 	adev->in_suspend = false;
4249 
4250 	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4251 		DRM_WARN("smart shift update failed\n");
4252 
4253 	return 0;
4254 }
4255 
4256 /**
4257  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4258  *
4259  * @adev: amdgpu_device pointer
4260  *
4261  * The list of all the hardware IPs that make up the asic is walked and
4262  * the check_soft_reset callbacks are run.  check_soft_reset determines
4263  * if the asic is still hung or not.
4264  * Returns true if any of the IPs are still in a hung state, false if not.
4265  */
4266 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4267 {
4268 	int i;
4269 	bool asic_hang = false;
4270 
4271 	if (amdgpu_sriov_vf(adev))
4272 		return true;
4273 
4274 	if (amdgpu_asic_need_full_reset(adev))
4275 		return true;
4276 
4277 	for (i = 0; i < adev->num_ip_blocks; i++) {
4278 		if (!adev->ip_blocks[i].status.valid)
4279 			continue;
4280 		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4281 			adev->ip_blocks[i].status.hang =
4282 				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4283 		if (adev->ip_blocks[i].status.hang) {
4284 			dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4285 			asic_hang = true;
4286 		}
4287 	}
4288 	return asic_hang;
4289 }
4290 
4291 /**
4292  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4293  *
4294  * @adev: amdgpu_device pointer
4295  *
4296  * The list of all the hardware IPs that make up the asic is walked and the
4297  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4298  * handles any IP specific hardware or software state changes that are
4299  * necessary for a soft reset to succeed.
4300  * Returns 0 on success, negative error code on failure.
4301  */
4302 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4303 {
4304 	int i, r = 0;
4305 
4306 	for (i = 0; i < adev->num_ip_blocks; i++) {
4307 		if (!adev->ip_blocks[i].status.valid)
4308 			continue;
4309 		if (adev->ip_blocks[i].status.hang &&
4310 		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4311 			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4312 			if (r)
4313 				return r;
4314 		}
4315 	}
4316 
4317 	return 0;
4318 }
4319 
4320 /**
4321  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4322  *
4323  * @adev: amdgpu_device pointer
4324  *
4325  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4326  * reset is necessary to recover.
4327  * Returns true if a full asic reset is required, false if not.
4328  */
4329 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4330 {
4331 	int i;
4332 
4333 	if (amdgpu_asic_need_full_reset(adev))
4334 		return true;
4335 
4336 	for (i = 0; i < adev->num_ip_blocks; i++) {
4337 		if (!adev->ip_blocks[i].status.valid)
4338 			continue;
4339 		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4340 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4341 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4342 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4343 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4344 			if (adev->ip_blocks[i].status.hang) {
4345 				dev_info(adev->dev, "Some block need full reset!\n");
4346 				return true;
4347 			}
4348 		}
4349 	}
4350 	return false;
4351 }
4352 
4353 /**
4354  * amdgpu_device_ip_soft_reset - do a soft reset
4355  *
4356  * @adev: amdgpu_device pointer
4357  *
4358  * The list of all the hardware IPs that make up the asic is walked and the
4359  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4360  * IP specific hardware or software state changes that are necessary to soft
4361  * reset the IP.
4362  * Returns 0 on success, negative error code on failure.
4363  */
4364 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4365 {
4366 	int i, r = 0;
4367 
4368 	for (i = 0; i < adev->num_ip_blocks; i++) {
4369 		if (!adev->ip_blocks[i].status.valid)
4370 			continue;
4371 		if (adev->ip_blocks[i].status.hang &&
4372 		    adev->ip_blocks[i].version->funcs->soft_reset) {
4373 			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4374 			if (r)
4375 				return r;
4376 		}
4377 	}
4378 
4379 	return 0;
4380 }
4381 
4382 /**
4383  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4384  *
4385  * @adev: amdgpu_device pointer
4386  *
4387  * The list of all the hardware IPs that make up the asic is walked and the
4388  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4389  * handles any IP specific hardware or software state changes that are
4390  * necessary after the IP has been soft reset.
4391  * Returns 0 on success, negative error code on failure.
4392  */
4393 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4394 {
4395 	int i, r = 0;
4396 
4397 	for (i = 0; i < adev->num_ip_blocks; i++) {
4398 		if (!adev->ip_blocks[i].status.valid)
4399 			continue;
4400 		if (adev->ip_blocks[i].status.hang &&
4401 		    adev->ip_blocks[i].version->funcs->post_soft_reset)
4402 			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4403 		if (r)
4404 			return r;
4405 	}
4406 
4407 	return 0;
4408 }
4409 
4410 /**
4411  * amdgpu_device_recover_vram - Recover some VRAM contents
4412  *
4413  * @adev: amdgpu_device pointer
4414  *
4415  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4416  * restore things like GPUVM page tables after a GPU reset where
4417  * the contents of VRAM might be lost.
4418  *
4419  * Returns:
4420  * 0 on success, negative error code on failure.
4421  */
4422 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4423 {
4424 	struct dma_fence *fence = NULL, *next = NULL;
4425 	struct amdgpu_bo *shadow;
4426 	struct amdgpu_bo_vm *vmbo;
4427 	long r = 1, tmo;
4428 
4429 	if (amdgpu_sriov_runtime(adev))
4430 		tmo = msecs_to_jiffies(8000);
4431 	else
4432 		tmo = msecs_to_jiffies(100);
4433 
4434 	dev_info(adev->dev, "recover vram bo from shadow start\n");
4435 	mutex_lock(&adev->shadow_list_lock);
4436 	list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4437 		shadow = &vmbo->bo;
4438 		/* No need to recover an evicted BO */
4439 		if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4440 		    shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4441 		    shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4442 			continue;
4443 
4444 		r = amdgpu_bo_restore_shadow(shadow, &next);
4445 		if (r)
4446 			break;
4447 
4448 		if (fence) {
4449 			tmo = dma_fence_wait_timeout(fence, false, tmo);
4450 			dma_fence_put(fence);
4451 			fence = next;
4452 			if (tmo == 0) {
4453 				r = -ETIMEDOUT;
4454 				break;
4455 			} else if (tmo < 0) {
4456 				r = tmo;
4457 				break;
4458 			}
4459 		} else {
4460 			fence = next;
4461 		}
4462 	}
4463 	mutex_unlock(&adev->shadow_list_lock);
4464 
4465 	if (fence)
4466 		tmo = dma_fence_wait_timeout(fence, false, tmo);
4467 	dma_fence_put(fence);
4468 
4469 	if (r < 0 || tmo <= 0) {
4470 		dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4471 		return -EIO;
4472 	}
4473 
4474 	dev_info(adev->dev, "recover vram bo from shadow done\n");
4475 	return 0;
4476 }
4477 
4478 
4479 /**
4480  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4481  *
4482  * @adev: amdgpu_device pointer
4483  * @from_hypervisor: request from hypervisor
4484  *
4485  * do VF FLR and reinitialize Asic
4486  * return 0 means succeeded otherwise failed
4487  */
4488 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4489 				     bool from_hypervisor)
4490 {
4491 	int r;
4492 	struct amdgpu_hive_info *hive = NULL;
4493 	int retry_limit = 0;
4494 
4495 retry:
4496 	amdgpu_amdkfd_pre_reset(adev);
4497 
4498 	if (from_hypervisor)
4499 		r = amdgpu_virt_request_full_gpu(adev, true);
4500 	else
4501 		r = amdgpu_virt_reset_gpu(adev);
4502 	if (r)
4503 		return r;
4504 
4505 	/* Resume IP prior to SMC */
4506 	r = amdgpu_device_ip_reinit_early_sriov(adev);
4507 	if (r)
4508 		goto error;
4509 
4510 	amdgpu_virt_init_data_exchange(adev);
4511 
4512 	r = amdgpu_device_fw_loading(adev);
4513 	if (r)
4514 		return r;
4515 
4516 	/* now we are okay to resume SMC/CP/SDMA */
4517 	r = amdgpu_device_ip_reinit_late_sriov(adev);
4518 	if (r)
4519 		goto error;
4520 
4521 	hive = amdgpu_get_xgmi_hive(adev);
4522 	/* Update PSP FW topology after reset */
4523 	if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4524 		r = amdgpu_xgmi_update_topology(hive, adev);
4525 
4526 	if (hive)
4527 		amdgpu_put_xgmi_hive(hive);
4528 
4529 	if (!r) {
4530 		amdgpu_irq_gpu_reset_resume_helper(adev);
4531 		r = amdgpu_ib_ring_tests(adev);
4532 
4533 		amdgpu_amdkfd_post_reset(adev);
4534 	}
4535 
4536 error:
4537 	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4538 		amdgpu_inc_vram_lost(adev);
4539 		r = amdgpu_device_recover_vram(adev);
4540 	}
4541 	amdgpu_virt_release_full_gpu(adev, true);
4542 
4543 	if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4544 		if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4545 			retry_limit++;
4546 			goto retry;
4547 		} else
4548 			DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4549 	}
4550 
4551 	return r;
4552 }
4553 
4554 /**
4555  * amdgpu_device_has_job_running - check if there is any job in mirror list
4556  *
4557  * @adev: amdgpu_device pointer
4558  *
4559  * check if there is any job in mirror list
4560  */
4561 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4562 {
4563 	int i;
4564 	struct drm_sched_job *job;
4565 
4566 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4567 		struct amdgpu_ring *ring = adev->rings[i];
4568 
4569 		if (!ring || !ring->sched.thread)
4570 			continue;
4571 
4572 		spin_lock(&ring->sched.job_list_lock);
4573 		job = list_first_entry_or_null(&ring->sched.pending_list,
4574 					       struct drm_sched_job, list);
4575 		spin_unlock(&ring->sched.job_list_lock);
4576 		if (job)
4577 			return true;
4578 	}
4579 	return false;
4580 }
4581 
4582 /**
4583  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4584  *
4585  * @adev: amdgpu_device pointer
4586  *
4587  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4588  * a hung GPU.
4589  */
4590 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4591 {
4592 
4593 	if (amdgpu_gpu_recovery == 0)
4594 		goto disabled;
4595 
4596 	/* Skip soft reset check in fatal error mode */
4597 	if (!amdgpu_ras_is_poison_mode_supported(adev))
4598 		return true;
4599 
4600 	if (!amdgpu_device_ip_check_soft_reset(adev)) {
4601 		dev_info(adev->dev,"Timeout, but no hardware hang detected.\n");
4602 		return false;
4603 	}
4604 
4605 	if (amdgpu_sriov_vf(adev))
4606 		return true;
4607 
4608 	if (amdgpu_gpu_recovery == -1) {
4609 		switch (adev->asic_type) {
4610 #ifdef CONFIG_DRM_AMDGPU_SI
4611 		case CHIP_VERDE:
4612 		case CHIP_TAHITI:
4613 		case CHIP_PITCAIRN:
4614 		case CHIP_OLAND:
4615 		case CHIP_HAINAN:
4616 #endif
4617 #ifdef CONFIG_DRM_AMDGPU_CIK
4618 		case CHIP_KAVERI:
4619 		case CHIP_KABINI:
4620 		case CHIP_MULLINS:
4621 #endif
4622 		case CHIP_CARRIZO:
4623 		case CHIP_STONEY:
4624 		case CHIP_CYAN_SKILLFISH:
4625 			goto disabled;
4626 		default:
4627 			break;
4628 		}
4629 	}
4630 
4631 	return true;
4632 
4633 disabled:
4634 		dev_info(adev->dev, "GPU recovery disabled.\n");
4635 		return false;
4636 }
4637 
4638 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4639 {
4640         u32 i;
4641         int ret = 0;
4642 
4643         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4644 
4645         dev_info(adev->dev, "GPU mode1 reset\n");
4646 
4647         /* disable BM */
4648         pci_clear_master(adev->pdev);
4649 
4650         amdgpu_device_cache_pci_state(adev->pdev);
4651 
4652         if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4653                 dev_info(adev->dev, "GPU smu mode1 reset\n");
4654                 ret = amdgpu_dpm_mode1_reset(adev);
4655         } else {
4656                 dev_info(adev->dev, "GPU psp mode1 reset\n");
4657                 ret = psp_gpu_reset(adev);
4658         }
4659 
4660         if (ret)
4661                 dev_err(adev->dev, "GPU mode1 reset failed\n");
4662 
4663         amdgpu_device_load_pci_state(adev->pdev);
4664 
4665         /* wait for asic to come out of reset */
4666         for (i = 0; i < adev->usec_timeout; i++) {
4667                 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4668 
4669                 if (memsize != 0xffffffff)
4670                         break;
4671                 udelay(1);
4672         }
4673 
4674         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4675         return ret;
4676 }
4677 
4678 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4679 				 struct amdgpu_reset_context *reset_context)
4680 {
4681 	int i, r = 0;
4682 	struct amdgpu_job *job = NULL;
4683 	bool need_full_reset =
4684 		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4685 
4686 	if (reset_context->reset_req_dev == adev)
4687 		job = reset_context->job;
4688 
4689 	if (amdgpu_sriov_vf(adev)) {
4690 		/* stop the data exchange thread */
4691 		amdgpu_virt_fini_data_exchange(adev);
4692 	}
4693 
4694 	amdgpu_fence_driver_isr_toggle(adev, true);
4695 
4696 	/* block all schedulers and reset given job's ring */
4697 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4698 		struct amdgpu_ring *ring = adev->rings[i];
4699 
4700 		if (!ring || !ring->sched.thread)
4701 			continue;
4702 
4703 		/*clear job fence from fence drv to avoid force_completion
4704 		 *leave NULL and vm flush fence in fence drv */
4705 		amdgpu_fence_driver_clear_job_fences(ring);
4706 
4707 		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4708 		amdgpu_fence_driver_force_completion(ring);
4709 	}
4710 
4711 	amdgpu_fence_driver_isr_toggle(adev, false);
4712 
4713 	if (job && job->vm)
4714 		drm_sched_increase_karma(&job->base);
4715 
4716 	r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4717 	/* If reset handler not implemented, continue; otherwise return */
4718 	if (r == -ENOSYS)
4719 		r = 0;
4720 	else
4721 		return r;
4722 
4723 	/* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4724 	if (!amdgpu_sriov_vf(adev)) {
4725 
4726 		if (!need_full_reset)
4727 			need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4728 
4729 		if (!need_full_reset && amdgpu_gpu_recovery) {
4730 			amdgpu_device_ip_pre_soft_reset(adev);
4731 			r = amdgpu_device_ip_soft_reset(adev);
4732 			amdgpu_device_ip_post_soft_reset(adev);
4733 			if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4734 				dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4735 				need_full_reset = true;
4736 			}
4737 		}
4738 
4739 		if (need_full_reset)
4740 			r = amdgpu_device_ip_suspend(adev);
4741 		if (need_full_reset)
4742 			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4743 		else
4744 			clear_bit(AMDGPU_NEED_FULL_RESET,
4745 				  &reset_context->flags);
4746 	}
4747 
4748 	return r;
4749 }
4750 
4751 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
4752 {
4753 	int i;
4754 
4755 	lockdep_assert_held(&adev->reset_domain->sem);
4756 
4757 	for (i = 0; i < adev->num_regs; i++) {
4758 		adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]);
4759 		trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
4760 					     adev->reset_dump_reg_value[i]);
4761 	}
4762 
4763 	return 0;
4764 }
4765 
4766 #ifdef CONFIG_DEV_COREDUMP
4767 static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
4768 		size_t count, void *data, size_t datalen)
4769 {
4770 	struct drm_printer p;
4771 	struct amdgpu_device *adev = data;
4772 	struct drm_print_iterator iter;
4773 	int i;
4774 
4775 	iter.data = buffer;
4776 	iter.offset = 0;
4777 	iter.start = offset;
4778 	iter.remain = count;
4779 
4780 	p = drm_coredump_printer(&iter);
4781 
4782 	drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
4783 	drm_printf(&p, "kernel: " UTS_RELEASE "\n");
4784 	drm_printf(&p, "module: " KBUILD_MODNAME "\n");
4785 	drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec);
4786 	if (adev->reset_task_info.pid)
4787 		drm_printf(&p, "process_name: %s PID: %d\n",
4788 			   adev->reset_task_info.process_name,
4789 			   adev->reset_task_info.pid);
4790 
4791 	if (adev->reset_vram_lost)
4792 		drm_printf(&p, "VRAM is lost due to GPU reset!\n");
4793 	if (adev->num_regs) {
4794 		drm_printf(&p, "AMDGPU register dumps:\nOffset:     Value:\n");
4795 
4796 		for (i = 0; i < adev->num_regs; i++)
4797 			drm_printf(&p, "0x%08x: 0x%08x\n",
4798 				   adev->reset_dump_reg_list[i],
4799 				   adev->reset_dump_reg_value[i]);
4800 	}
4801 
4802 	return count - iter.remain;
4803 }
4804 
4805 static void amdgpu_devcoredump_free(void *data)
4806 {
4807 }
4808 
4809 static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
4810 {
4811 	struct drm_device *dev = adev_to_drm(adev);
4812 
4813 	ktime_get_ts64(&adev->reset_time);
4814 	dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
4815 		      amdgpu_devcoredump_read, amdgpu_devcoredump_free);
4816 }
4817 #endif
4818 
4819 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4820 			 struct amdgpu_reset_context *reset_context)
4821 {
4822 	struct amdgpu_device *tmp_adev = NULL;
4823 	bool need_full_reset, skip_hw_reset, vram_lost = false;
4824 	int r = 0;
4825 	bool gpu_reset_for_dev_remove = 0;
4826 
4827 	/* Try reset handler method first */
4828 	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4829 				    reset_list);
4830 	amdgpu_reset_reg_dumps(tmp_adev);
4831 
4832 	reset_context->reset_device_list = device_list_handle;
4833 	r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4834 	/* If reset handler not implemented, continue; otherwise return */
4835 	if (r == -ENOSYS)
4836 		r = 0;
4837 	else
4838 		return r;
4839 
4840 	/* Reset handler not implemented, use the default method */
4841 	need_full_reset =
4842 		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4843 	skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4844 
4845 	gpu_reset_for_dev_remove =
4846 		test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
4847 			test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4848 
4849 	/*
4850 	 * ASIC reset has to be done on all XGMI hive nodes ASAP
4851 	 * to allow proper links negotiation in FW (within 1 sec)
4852 	 */
4853 	if (!skip_hw_reset && need_full_reset) {
4854 		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4855 			/* For XGMI run all resets in parallel to speed up the process */
4856 			if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4857 				tmp_adev->gmc.xgmi.pending_reset = false;
4858 				if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4859 					r = -EALREADY;
4860 			} else
4861 				r = amdgpu_asic_reset(tmp_adev);
4862 
4863 			if (r) {
4864 				dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4865 					 r, adev_to_drm(tmp_adev)->unique);
4866 				break;
4867 			}
4868 		}
4869 
4870 		/* For XGMI wait for all resets to complete before proceed */
4871 		if (!r) {
4872 			list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4873 				if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4874 					flush_work(&tmp_adev->xgmi_reset_work);
4875 					r = tmp_adev->asic_reset_res;
4876 					if (r)
4877 						break;
4878 				}
4879 			}
4880 		}
4881 	}
4882 
4883 	if (!r && amdgpu_ras_intr_triggered()) {
4884 		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4885 			if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
4886 			    tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
4887 				tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
4888 		}
4889 
4890 		amdgpu_ras_intr_cleared();
4891 	}
4892 
4893 	/* Since the mode1 reset affects base ip blocks, the
4894 	 * phase1 ip blocks need to be resumed. Otherwise there
4895 	 * will be a BIOS signature error and the psp bootloader
4896 	 * can't load kdb on the next amdgpu install.
4897 	 */
4898 	if (gpu_reset_for_dev_remove) {
4899 		list_for_each_entry(tmp_adev, device_list_handle, reset_list)
4900 			amdgpu_device_ip_resume_phase1(tmp_adev);
4901 
4902 		goto end;
4903 	}
4904 
4905 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4906 		if (need_full_reset) {
4907 			/* post card */
4908 			r = amdgpu_device_asic_init(tmp_adev);
4909 			if (r) {
4910 				dev_warn(tmp_adev->dev, "asic atom init failed!");
4911 			} else {
4912 				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4913 				r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4914 				if (r)
4915 					goto out;
4916 
4917 				r = amdgpu_device_ip_resume_phase1(tmp_adev);
4918 				if (r)
4919 					goto out;
4920 
4921 				vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4922 #ifdef CONFIG_DEV_COREDUMP
4923 				tmp_adev->reset_vram_lost = vram_lost;
4924 				memset(&tmp_adev->reset_task_info, 0,
4925 						sizeof(tmp_adev->reset_task_info));
4926 				if (reset_context->job && reset_context->job->vm)
4927 					tmp_adev->reset_task_info =
4928 						reset_context->job->vm->task_info;
4929 				amdgpu_reset_capture_coredumpm(tmp_adev);
4930 #endif
4931 				if (vram_lost) {
4932 					DRM_INFO("VRAM is lost due to GPU reset!\n");
4933 					amdgpu_inc_vram_lost(tmp_adev);
4934 				}
4935 
4936 				r = amdgpu_device_fw_loading(tmp_adev);
4937 				if (r)
4938 					return r;
4939 
4940 				r = amdgpu_device_ip_resume_phase2(tmp_adev);
4941 				if (r)
4942 					goto out;
4943 
4944 				if (vram_lost)
4945 					amdgpu_device_fill_reset_magic(tmp_adev);
4946 
4947 				/*
4948 				 * Add this ASIC as tracked as reset was already
4949 				 * complete successfully.
4950 				 */
4951 				amdgpu_register_gpu_instance(tmp_adev);
4952 
4953 				if (!reset_context->hive &&
4954 				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4955 					amdgpu_xgmi_add_device(tmp_adev);
4956 
4957 				r = amdgpu_device_ip_late_init(tmp_adev);
4958 				if (r)
4959 					goto out;
4960 
4961 				drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
4962 
4963 				/*
4964 				 * The GPU enters bad state once faulty pages
4965 				 * by ECC has reached the threshold, and ras
4966 				 * recovery is scheduled next. So add one check
4967 				 * here to break recovery if it indeed exceeds
4968 				 * bad page threshold, and remind user to
4969 				 * retire this GPU or setting one bigger
4970 				 * bad_page_threshold value to fix this once
4971 				 * probing driver again.
4972 				 */
4973 				if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4974 					/* must succeed. */
4975 					amdgpu_ras_resume(tmp_adev);
4976 				} else {
4977 					r = -EINVAL;
4978 					goto out;
4979 				}
4980 
4981 				/* Update PSP FW topology after reset */
4982 				if (reset_context->hive &&
4983 				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4984 					r = amdgpu_xgmi_update_topology(
4985 						reset_context->hive, tmp_adev);
4986 			}
4987 		}
4988 
4989 out:
4990 		if (!r) {
4991 			amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4992 			r = amdgpu_ib_ring_tests(tmp_adev);
4993 			if (r) {
4994 				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4995 				need_full_reset = true;
4996 				r = -EAGAIN;
4997 				goto end;
4998 			}
4999 		}
5000 
5001 		if (!r)
5002 			r = amdgpu_device_recover_vram(tmp_adev);
5003 		else
5004 			tmp_adev->asic_reset_res = r;
5005 	}
5006 
5007 end:
5008 	if (need_full_reset)
5009 		set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5010 	else
5011 		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5012 	return r;
5013 }
5014 
5015 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5016 {
5017 
5018 	switch (amdgpu_asic_reset_method(adev)) {
5019 	case AMD_RESET_METHOD_MODE1:
5020 		adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5021 		break;
5022 	case AMD_RESET_METHOD_MODE2:
5023 		adev->mp1_state = PP_MP1_STATE_RESET;
5024 		break;
5025 	default:
5026 		adev->mp1_state = PP_MP1_STATE_NONE;
5027 		break;
5028 	}
5029 }
5030 
5031 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5032 {
5033 	amdgpu_vf_error_trans_all(adev);
5034 	adev->mp1_state = PP_MP1_STATE_NONE;
5035 }
5036 
5037 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5038 {
5039 	struct pci_dev *p = NULL;
5040 
5041 	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5042 			adev->pdev->bus->number, 1);
5043 	if (p) {
5044 		pm_runtime_enable(&(p->dev));
5045 		pm_runtime_resume(&(p->dev));
5046 	}
5047 }
5048 
5049 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5050 {
5051 	enum amd_reset_method reset_method;
5052 	struct pci_dev *p = NULL;
5053 	u64 expires;
5054 
5055 	/*
5056 	 * For now, only BACO and mode1 reset are confirmed
5057 	 * to suffer the audio issue without proper suspended.
5058 	 */
5059 	reset_method = amdgpu_asic_reset_method(adev);
5060 	if ((reset_method != AMD_RESET_METHOD_BACO) &&
5061 	     (reset_method != AMD_RESET_METHOD_MODE1))
5062 		return -EINVAL;
5063 
5064 	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5065 			adev->pdev->bus->number, 1);
5066 	if (!p)
5067 		return -ENODEV;
5068 
5069 	expires = pm_runtime_autosuspend_expiration(&(p->dev));
5070 	if (!expires)
5071 		/*
5072 		 * If we cannot get the audio device autosuspend delay,
5073 		 * a fixed 4S interval will be used. Considering 3S is
5074 		 * the audio controller default autosuspend delay setting.
5075 		 * 4S used here is guaranteed to cover that.
5076 		 */
5077 		expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5078 
5079 	while (!pm_runtime_status_suspended(&(p->dev))) {
5080 		if (!pm_runtime_suspend(&(p->dev)))
5081 			break;
5082 
5083 		if (expires < ktime_get_mono_fast_ns()) {
5084 			dev_warn(adev->dev, "failed to suspend display audio\n");
5085 			/* TODO: abort the succeeding gpu reset? */
5086 			return -ETIMEDOUT;
5087 		}
5088 	}
5089 
5090 	pm_runtime_disable(&(p->dev));
5091 
5092 	return 0;
5093 }
5094 
5095 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5096 {
5097 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5098 
5099 #if defined(CONFIG_DEBUG_FS)
5100 	if (!amdgpu_sriov_vf(adev))
5101 		cancel_work(&adev->reset_work);
5102 #endif
5103 
5104 	if (adev->kfd.dev)
5105 		cancel_work(&adev->kfd.reset_work);
5106 
5107 	if (amdgpu_sriov_vf(adev))
5108 		cancel_work(&adev->virt.flr_work);
5109 
5110 	if (con && adev->ras_enabled)
5111 		cancel_work(&con->recovery_work);
5112 
5113 }
5114 
5115 /**
5116  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5117  *
5118  * @adev: amdgpu_device pointer
5119  * @job: which job trigger hang
5120  *
5121  * Attempt to reset the GPU if it has hung (all asics).
5122  * Attempt to do soft-reset or full-reset and reinitialize Asic
5123  * Returns 0 for success or an error on failure.
5124  */
5125 
5126 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5127 			      struct amdgpu_job *job,
5128 			      struct amdgpu_reset_context *reset_context)
5129 {
5130 	struct list_head device_list, *device_list_handle =  NULL;
5131 	bool job_signaled = false;
5132 	struct amdgpu_hive_info *hive = NULL;
5133 	struct amdgpu_device *tmp_adev = NULL;
5134 	int i, r = 0;
5135 	bool need_emergency_restart = false;
5136 	bool audio_suspended = false;
5137 	bool gpu_reset_for_dev_remove = false;
5138 
5139 	gpu_reset_for_dev_remove =
5140 			test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
5141 				test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5142 
5143 	/*
5144 	 * Special case: RAS triggered and full reset isn't supported
5145 	 */
5146 	need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5147 
5148 	/*
5149 	 * Flush RAM to disk so that after reboot
5150 	 * the user can read log and see why the system rebooted.
5151 	 */
5152 	if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
5153 		DRM_WARN("Emergency reboot.");
5154 
5155 		ksys_sync_helper();
5156 		emergency_restart();
5157 	}
5158 
5159 	dev_info(adev->dev, "GPU %s begin!\n",
5160 		need_emergency_restart ? "jobs stop":"reset");
5161 
5162 	if (!amdgpu_sriov_vf(adev))
5163 		hive = amdgpu_get_xgmi_hive(adev);
5164 	if (hive)
5165 		mutex_lock(&hive->hive_lock);
5166 
5167 	reset_context->job = job;
5168 	reset_context->hive = hive;
5169 	/*
5170 	 * Build list of devices to reset.
5171 	 * In case we are in XGMI hive mode, resort the device list
5172 	 * to put adev in the 1st position.
5173 	 */
5174 	INIT_LIST_HEAD(&device_list);
5175 	if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5176 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5177 			list_add_tail(&tmp_adev->reset_list, &device_list);
5178 			if (gpu_reset_for_dev_remove && adev->shutdown)
5179 				tmp_adev->shutdown = true;
5180 		}
5181 		if (!list_is_first(&adev->reset_list, &device_list))
5182 			list_rotate_to_front(&adev->reset_list, &device_list);
5183 		device_list_handle = &device_list;
5184 	} else {
5185 		list_add_tail(&adev->reset_list, &device_list);
5186 		device_list_handle = &device_list;
5187 	}
5188 
5189 	/* We need to lock reset domain only once both for XGMI and single device */
5190 	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5191 				    reset_list);
5192 	amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5193 
5194 	/* block all schedulers and reset given job's ring */
5195 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5196 
5197 		amdgpu_device_set_mp1_state(tmp_adev);
5198 
5199 		/*
5200 		 * Try to put the audio codec into suspend state
5201 		 * before gpu reset started.
5202 		 *
5203 		 * Due to the power domain of the graphics device
5204 		 * is shared with AZ power domain. Without this,
5205 		 * we may change the audio hardware from behind
5206 		 * the audio driver's back. That will trigger
5207 		 * some audio codec errors.
5208 		 */
5209 		if (!amdgpu_device_suspend_display_audio(tmp_adev))
5210 			audio_suspended = true;
5211 
5212 		amdgpu_ras_set_error_query_ready(tmp_adev, false);
5213 
5214 		cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5215 
5216 		if (!amdgpu_sriov_vf(tmp_adev))
5217 			amdgpu_amdkfd_pre_reset(tmp_adev);
5218 
5219 		/*
5220 		 * Mark these ASICs to be reseted as untracked first
5221 		 * And add them back after reset completed
5222 		 */
5223 		amdgpu_unregister_gpu_instance(tmp_adev);
5224 
5225 		drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5226 
5227 		/* disable ras on ALL IPs */
5228 		if (!need_emergency_restart &&
5229 		      amdgpu_device_ip_need_full_reset(tmp_adev))
5230 			amdgpu_ras_suspend(tmp_adev);
5231 
5232 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5233 			struct amdgpu_ring *ring = tmp_adev->rings[i];
5234 
5235 			if (!ring || !ring->sched.thread)
5236 				continue;
5237 
5238 			drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5239 
5240 			if (need_emergency_restart)
5241 				amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5242 		}
5243 		atomic_inc(&tmp_adev->gpu_reset_counter);
5244 	}
5245 
5246 	if (need_emergency_restart)
5247 		goto skip_sched_resume;
5248 
5249 	/*
5250 	 * Must check guilty signal here since after this point all old
5251 	 * HW fences are force signaled.
5252 	 *
5253 	 * job->base holds a reference to parent fence
5254 	 */
5255 	if (job && dma_fence_is_signaled(&job->hw_fence)) {
5256 		job_signaled = true;
5257 		dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5258 		goto skip_hw_reset;
5259 	}
5260 
5261 retry:	/* Rest of adevs pre asic reset from XGMI hive. */
5262 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5263 		if (gpu_reset_for_dev_remove) {
5264 			/* Workaroud for ASICs need to disable SMC first */
5265 			amdgpu_device_smu_fini_early(tmp_adev);
5266 		}
5267 		r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5268 		/*TODO Should we stop ?*/
5269 		if (r) {
5270 			dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5271 				  r, adev_to_drm(tmp_adev)->unique);
5272 			tmp_adev->asic_reset_res = r;
5273 		}
5274 
5275 		/*
5276 		 * Drop all pending non scheduler resets. Scheduler resets
5277 		 * were already dropped during drm_sched_stop
5278 		 */
5279 		amdgpu_device_stop_pending_resets(tmp_adev);
5280 	}
5281 
5282 	/* Actual ASIC resets if needed.*/
5283 	/* Host driver will handle XGMI hive reset for SRIOV */
5284 	if (amdgpu_sriov_vf(adev)) {
5285 		r = amdgpu_device_reset_sriov(adev, job ? false : true);
5286 		if (r)
5287 			adev->asic_reset_res = r;
5288 
5289 		/* Aldebaran supports ras in SRIOV, so need resume ras during reset */
5290 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
5291 			amdgpu_ras_resume(adev);
5292 	} else {
5293 		r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5294 		if (r && r == -EAGAIN)
5295 			goto retry;
5296 
5297 		if (!r && gpu_reset_for_dev_remove)
5298 			goto recover_end;
5299 	}
5300 
5301 skip_hw_reset:
5302 
5303 	/* Post ASIC reset for all devs .*/
5304 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5305 
5306 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5307 			struct amdgpu_ring *ring = tmp_adev->rings[i];
5308 
5309 			if (!ring || !ring->sched.thread)
5310 				continue;
5311 
5312 			drm_sched_start(&ring->sched, true);
5313 		}
5314 
5315 		if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))
5316 			amdgpu_mes_self_test(tmp_adev);
5317 
5318 		if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
5319 			drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5320 		}
5321 
5322 		if (tmp_adev->asic_reset_res)
5323 			r = tmp_adev->asic_reset_res;
5324 
5325 		tmp_adev->asic_reset_res = 0;
5326 
5327 		if (r) {
5328 			/* bad news, how to tell it to userspace ? */
5329 			dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5330 			amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5331 		} else {
5332 			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5333 			if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5334 				DRM_WARN("smart shift update failed\n");
5335 		}
5336 	}
5337 
5338 skip_sched_resume:
5339 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5340 		/* unlock kfd: SRIOV would do it separately */
5341 		if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5342 			amdgpu_amdkfd_post_reset(tmp_adev);
5343 
5344 		/* kfd_post_reset will do nothing if kfd device is not initialized,
5345 		 * need to bring up kfd here if it's not be initialized before
5346 		 */
5347 		if (!adev->kfd.init_complete)
5348 			amdgpu_amdkfd_device_init(adev);
5349 
5350 		if (audio_suspended)
5351 			amdgpu_device_resume_display_audio(tmp_adev);
5352 
5353 		amdgpu_device_unset_mp1_state(tmp_adev);
5354 
5355 		amdgpu_ras_set_error_query_ready(tmp_adev, true);
5356 	}
5357 
5358 recover_end:
5359 	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5360 					    reset_list);
5361 	amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5362 
5363 	if (hive) {
5364 		mutex_unlock(&hive->hive_lock);
5365 		amdgpu_put_xgmi_hive(hive);
5366 	}
5367 
5368 	if (r)
5369 		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5370 
5371 	atomic_set(&adev->reset_domain->reset_res, r);
5372 	return r;
5373 }
5374 
5375 /**
5376  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5377  *
5378  * @adev: amdgpu_device pointer
5379  *
5380  * Fetchs and stores in the driver the PCIE capabilities (gen speed
5381  * and lanes) of the slot the device is in. Handles APUs and
5382  * virtualized environments where PCIE config space may not be available.
5383  */
5384 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5385 {
5386 	struct pci_dev *pdev;
5387 	enum pci_bus_speed speed_cap, platform_speed_cap;
5388 	enum pcie_link_width platform_link_width;
5389 
5390 	if (amdgpu_pcie_gen_cap)
5391 		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5392 
5393 	if (amdgpu_pcie_lane_cap)
5394 		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5395 
5396 	/* covers APUs as well */
5397 	if (pci_is_root_bus(adev->pdev->bus)) {
5398 		if (adev->pm.pcie_gen_mask == 0)
5399 			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5400 		if (adev->pm.pcie_mlw_mask == 0)
5401 			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5402 		return;
5403 	}
5404 
5405 	if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5406 		return;
5407 
5408 	pcie_bandwidth_available(adev->pdev, NULL,
5409 				 &platform_speed_cap, &platform_link_width);
5410 
5411 	if (adev->pm.pcie_gen_mask == 0) {
5412 		/* asic caps */
5413 		pdev = adev->pdev;
5414 		speed_cap = pcie_get_speed_cap(pdev);
5415 		if (speed_cap == PCI_SPEED_UNKNOWN) {
5416 			adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5417 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5418 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5419 		} else {
5420 			if (speed_cap == PCIE_SPEED_32_0GT)
5421 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5422 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5423 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5424 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5425 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5426 			else if (speed_cap == PCIE_SPEED_16_0GT)
5427 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5428 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5429 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5430 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5431 			else if (speed_cap == PCIE_SPEED_8_0GT)
5432 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5433 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5434 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5435 			else if (speed_cap == PCIE_SPEED_5_0GT)
5436 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5437 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5438 			else
5439 				adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5440 		}
5441 		/* platform caps */
5442 		if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5443 			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5444 						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5445 		} else {
5446 			if (platform_speed_cap == PCIE_SPEED_32_0GT)
5447 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5448 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5449 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5450 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5451 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5452 			else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5453 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5454 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5455 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5456 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5457 			else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5458 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5459 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5460 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5461 			else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5462 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5463 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5464 			else
5465 				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5466 
5467 		}
5468 	}
5469 	if (adev->pm.pcie_mlw_mask == 0) {
5470 		if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5471 			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5472 		} else {
5473 			switch (platform_link_width) {
5474 			case PCIE_LNK_X32:
5475 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5476 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5477 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5478 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5479 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5480 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5481 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5482 				break;
5483 			case PCIE_LNK_X16:
5484 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5485 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5486 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5487 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5488 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5489 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5490 				break;
5491 			case PCIE_LNK_X12:
5492 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5493 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5494 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5495 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5496 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5497 				break;
5498 			case PCIE_LNK_X8:
5499 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5500 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5501 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5502 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5503 				break;
5504 			case PCIE_LNK_X4:
5505 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5506 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5507 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5508 				break;
5509 			case PCIE_LNK_X2:
5510 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5511 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5512 				break;
5513 			case PCIE_LNK_X1:
5514 				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5515 				break;
5516 			default:
5517 				break;
5518 			}
5519 		}
5520 	}
5521 }
5522 
5523 /**
5524  * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5525  *
5526  * @adev: amdgpu_device pointer
5527  * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5528  *
5529  * Return true if @peer_adev can access (DMA) @adev through the PCIe
5530  * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5531  * @peer_adev.
5532  */
5533 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5534 				      struct amdgpu_device *peer_adev)
5535 {
5536 #ifdef CONFIG_HSA_AMD_P2P
5537 	uint64_t address_mask = peer_adev->dev->dma_mask ?
5538 		~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5539 	resource_size_t aper_limit =
5540 		adev->gmc.aper_base + adev->gmc.aper_size - 1;
5541 	bool p2p_access =
5542 		!adev->gmc.xgmi.connected_to_cpu &&
5543 		!(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
5544 
5545 	return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5546 		adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5547 		!(adev->gmc.aper_base & address_mask ||
5548 		  aper_limit & address_mask));
5549 #else
5550 	return false;
5551 #endif
5552 }
5553 
5554 int amdgpu_device_baco_enter(struct drm_device *dev)
5555 {
5556 	struct amdgpu_device *adev = drm_to_adev(dev);
5557 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5558 
5559 	if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5560 		return -ENOTSUPP;
5561 
5562 	if (ras && adev->ras_enabled &&
5563 	    adev->nbio.funcs->enable_doorbell_interrupt)
5564 		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5565 
5566 	return amdgpu_dpm_baco_enter(adev);
5567 }
5568 
5569 int amdgpu_device_baco_exit(struct drm_device *dev)
5570 {
5571 	struct amdgpu_device *adev = drm_to_adev(dev);
5572 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5573 	int ret = 0;
5574 
5575 	if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5576 		return -ENOTSUPP;
5577 
5578 	ret = amdgpu_dpm_baco_exit(adev);
5579 	if (ret)
5580 		return ret;
5581 
5582 	if (ras && adev->ras_enabled &&
5583 	    adev->nbio.funcs->enable_doorbell_interrupt)
5584 		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5585 
5586 	if (amdgpu_passthrough(adev) &&
5587 	    adev->nbio.funcs->clear_doorbell_interrupt)
5588 		adev->nbio.funcs->clear_doorbell_interrupt(adev);
5589 
5590 	return 0;
5591 }
5592 
5593 /**
5594  * amdgpu_pci_error_detected - Called when a PCI error is detected.
5595  * @pdev: PCI device struct
5596  * @state: PCI channel state
5597  *
5598  * Description: Called when a PCI error is detected.
5599  *
5600  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5601  */
5602 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5603 {
5604 	struct drm_device *dev = pci_get_drvdata(pdev);
5605 	struct amdgpu_device *adev = drm_to_adev(dev);
5606 	int i;
5607 
5608 	DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5609 
5610 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
5611 		DRM_WARN("No support for XGMI hive yet...");
5612 		return PCI_ERS_RESULT_DISCONNECT;
5613 	}
5614 
5615 	adev->pci_channel_state = state;
5616 
5617 	switch (state) {
5618 	case pci_channel_io_normal:
5619 		return PCI_ERS_RESULT_CAN_RECOVER;
5620 	/* Fatal error, prepare for slot reset */
5621 	case pci_channel_io_frozen:
5622 		/*
5623 		 * Locking adev->reset_domain->sem will prevent any external access
5624 		 * to GPU during PCI error recovery
5625 		 */
5626 		amdgpu_device_lock_reset_domain(adev->reset_domain);
5627 		amdgpu_device_set_mp1_state(adev);
5628 
5629 		/*
5630 		 * Block any work scheduling as we do for regular GPU reset
5631 		 * for the duration of the recovery
5632 		 */
5633 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5634 			struct amdgpu_ring *ring = adev->rings[i];
5635 
5636 			if (!ring || !ring->sched.thread)
5637 				continue;
5638 
5639 			drm_sched_stop(&ring->sched, NULL);
5640 		}
5641 		atomic_inc(&adev->gpu_reset_counter);
5642 		return PCI_ERS_RESULT_NEED_RESET;
5643 	case pci_channel_io_perm_failure:
5644 		/* Permanent error, prepare for device removal */
5645 		return PCI_ERS_RESULT_DISCONNECT;
5646 	}
5647 
5648 	return PCI_ERS_RESULT_NEED_RESET;
5649 }
5650 
5651 /**
5652  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5653  * @pdev: pointer to PCI device
5654  */
5655 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5656 {
5657 
5658 	DRM_INFO("PCI error: mmio enabled callback!!\n");
5659 
5660 	/* TODO - dump whatever for debugging purposes */
5661 
5662 	/* This called only if amdgpu_pci_error_detected returns
5663 	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5664 	 * works, no need to reset slot.
5665 	 */
5666 
5667 	return PCI_ERS_RESULT_RECOVERED;
5668 }
5669 
5670 /**
5671  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5672  * @pdev: PCI device struct
5673  *
5674  * Description: This routine is called by the pci error recovery
5675  * code after the PCI slot has been reset, just before we
5676  * should resume normal operations.
5677  */
5678 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5679 {
5680 	struct drm_device *dev = pci_get_drvdata(pdev);
5681 	struct amdgpu_device *adev = drm_to_adev(dev);
5682 	int r, i;
5683 	struct amdgpu_reset_context reset_context;
5684 	u32 memsize;
5685 	struct list_head device_list;
5686 
5687 	DRM_INFO("PCI error: slot reset callback!!\n");
5688 
5689 	memset(&reset_context, 0, sizeof(reset_context));
5690 
5691 	INIT_LIST_HEAD(&device_list);
5692 	list_add_tail(&adev->reset_list, &device_list);
5693 
5694 	/* wait for asic to come out of reset */
5695 	msleep(500);
5696 
5697 	/* Restore PCI confspace */
5698 	amdgpu_device_load_pci_state(pdev);
5699 
5700 	/* confirm  ASIC came out of reset */
5701 	for (i = 0; i < adev->usec_timeout; i++) {
5702 		memsize = amdgpu_asic_get_config_memsize(adev);
5703 
5704 		if (memsize != 0xffffffff)
5705 			break;
5706 		udelay(1);
5707 	}
5708 	if (memsize == 0xffffffff) {
5709 		r = -ETIME;
5710 		goto out;
5711 	}
5712 
5713 	reset_context.method = AMD_RESET_METHOD_NONE;
5714 	reset_context.reset_req_dev = adev;
5715 	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5716 	set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5717 
5718 	adev->no_hw_access = true;
5719 	r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5720 	adev->no_hw_access = false;
5721 	if (r)
5722 		goto out;
5723 
5724 	r = amdgpu_do_asic_reset(&device_list, &reset_context);
5725 
5726 out:
5727 	if (!r) {
5728 		if (amdgpu_device_cache_pci_state(adev->pdev))
5729 			pci_restore_state(adev->pdev);
5730 
5731 		DRM_INFO("PCIe error recovery succeeded\n");
5732 	} else {
5733 		DRM_ERROR("PCIe error recovery failed, err:%d", r);
5734 		amdgpu_device_unset_mp1_state(adev);
5735 		amdgpu_device_unlock_reset_domain(adev->reset_domain);
5736 	}
5737 
5738 	return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5739 }
5740 
5741 /**
5742  * amdgpu_pci_resume() - resume normal ops after PCI reset
5743  * @pdev: pointer to PCI device
5744  *
5745  * Called when the error recovery driver tells us that its
5746  * OK to resume normal operation.
5747  */
5748 void amdgpu_pci_resume(struct pci_dev *pdev)
5749 {
5750 	struct drm_device *dev = pci_get_drvdata(pdev);
5751 	struct amdgpu_device *adev = drm_to_adev(dev);
5752 	int i;
5753 
5754 
5755 	DRM_INFO("PCI error: resume callback!!\n");
5756 
5757 	/* Only continue execution for the case of pci_channel_io_frozen */
5758 	if (adev->pci_channel_state != pci_channel_io_frozen)
5759 		return;
5760 
5761 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5762 		struct amdgpu_ring *ring = adev->rings[i];
5763 
5764 		if (!ring || !ring->sched.thread)
5765 			continue;
5766 
5767 		drm_sched_start(&ring->sched, true);
5768 	}
5769 
5770 	amdgpu_device_unset_mp1_state(adev);
5771 	amdgpu_device_unlock_reset_domain(adev->reset_domain);
5772 }
5773 
5774 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5775 {
5776 	struct drm_device *dev = pci_get_drvdata(pdev);
5777 	struct amdgpu_device *adev = drm_to_adev(dev);
5778 	int r;
5779 
5780 	r = pci_save_state(pdev);
5781 	if (!r) {
5782 		kfree(adev->pci_state);
5783 
5784 		adev->pci_state = pci_store_saved_state(pdev);
5785 
5786 		if (!adev->pci_state) {
5787 			DRM_ERROR("Failed to store PCI saved state");
5788 			return false;
5789 		}
5790 	} else {
5791 		DRM_WARN("Failed to save PCI state, err:%d\n", r);
5792 		return false;
5793 	}
5794 
5795 	return true;
5796 }
5797 
5798 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5799 {
5800 	struct drm_device *dev = pci_get_drvdata(pdev);
5801 	struct amdgpu_device *adev = drm_to_adev(dev);
5802 	int r;
5803 
5804 	if (!adev->pci_state)
5805 		return false;
5806 
5807 	r = pci_load_saved_state(pdev, adev->pci_state);
5808 
5809 	if (!r) {
5810 		pci_restore_state(pdev);
5811 	} else {
5812 		DRM_WARN("Failed to load PCI state, err:%d\n", r);
5813 		return false;
5814 	}
5815 
5816 	return true;
5817 }
5818 
5819 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5820 		struct amdgpu_ring *ring)
5821 {
5822 #ifdef CONFIG_X86_64
5823 	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5824 		return;
5825 #endif
5826 	if (adev->gmc.xgmi.connected_to_cpu)
5827 		return;
5828 
5829 	if (ring && ring->funcs->emit_hdp_flush)
5830 		amdgpu_ring_emit_hdp_flush(ring);
5831 	else
5832 		amdgpu_asic_flush_hdp(adev, ring);
5833 }
5834 
5835 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5836 		struct amdgpu_ring *ring)
5837 {
5838 #ifdef CONFIG_X86_64
5839 	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5840 		return;
5841 #endif
5842 	if (adev->gmc.xgmi.connected_to_cpu)
5843 		return;
5844 
5845 	amdgpu_asic_invalidate_hdp(adev, ring);
5846 }
5847 
5848 int amdgpu_in_reset(struct amdgpu_device *adev)
5849 {
5850 	return atomic_read(&adev->reset_domain->in_gpu_reset);
5851 	}
5852 
5853 /**
5854  * amdgpu_device_halt() - bring hardware to some kind of halt state
5855  *
5856  * @adev: amdgpu_device pointer
5857  *
5858  * Bring hardware to some kind of halt state so that no one can touch it
5859  * any more. It will help to maintain error context when error occurred.
5860  * Compare to a simple hang, the system will keep stable at least for SSH
5861  * access. Then it should be trivial to inspect the hardware state and
5862  * see what's going on. Implemented as following:
5863  *
5864  * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
5865  *    clears all CPU mappings to device, disallows remappings through page faults
5866  * 2. amdgpu_irq_disable_all() disables all interrupts
5867  * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
5868  * 4. set adev->no_hw_access to avoid potential crashes after setp 5
5869  * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
5870  * 6. pci_disable_device() and pci_wait_for_pending_transaction()
5871  *    flush any in flight DMA operations
5872  */
5873 void amdgpu_device_halt(struct amdgpu_device *adev)
5874 {
5875 	struct pci_dev *pdev = adev->pdev;
5876 	struct drm_device *ddev = adev_to_drm(adev);
5877 
5878 	drm_dev_unplug(ddev);
5879 
5880 	amdgpu_irq_disable_all(adev);
5881 
5882 	amdgpu_fence_driver_hw_fini(adev);
5883 
5884 	adev->no_hw_access = true;
5885 
5886 	amdgpu_device_unmap_mmio(adev);
5887 
5888 	pci_disable_device(pdev);
5889 	pci_wait_for_pending_transaction(pdev);
5890 }
5891 
5892 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
5893 				u32 reg)
5894 {
5895 	unsigned long flags, address, data;
5896 	u32 r;
5897 
5898 	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5899 	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5900 
5901 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5902 	WREG32(address, reg * 4);
5903 	(void)RREG32(address);
5904 	r = RREG32(data);
5905 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5906 	return r;
5907 }
5908 
5909 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
5910 				u32 reg, u32 v)
5911 {
5912 	unsigned long flags, address, data;
5913 
5914 	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5915 	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5916 
5917 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5918 	WREG32(address, reg * 4);
5919 	(void)RREG32(address);
5920 	WREG32(data, v);
5921 	(void)RREG32(data);
5922 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5923 }
5924 
5925 /**
5926  * amdgpu_device_switch_gang - switch to a new gang
5927  * @adev: amdgpu_device pointer
5928  * @gang: the gang to switch to
5929  *
5930  * Try to switch to a new gang.
5931  * Returns: NULL if we switched to the new gang or a reference to the current
5932  * gang leader.
5933  */
5934 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
5935 					    struct dma_fence *gang)
5936 {
5937 	struct dma_fence *old = NULL;
5938 
5939 	do {
5940 		dma_fence_put(old);
5941 		rcu_read_lock();
5942 		old = dma_fence_get_rcu_safe(&adev->gang_submit);
5943 		rcu_read_unlock();
5944 
5945 		if (old == gang)
5946 			break;
5947 
5948 		if (!dma_fence_is_signaled(old))
5949 			return old;
5950 
5951 	} while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
5952 			 old, gang) != old);
5953 
5954 	dma_fence_put(old);
5955 	return NULL;
5956 }
5957 
5958 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
5959 {
5960 	switch (adev->asic_type) {
5961 #ifdef CONFIG_DRM_AMDGPU_SI
5962 	case CHIP_HAINAN:
5963 #endif
5964 	case CHIP_TOPAZ:
5965 		/* chips with no display hardware */
5966 		return false;
5967 #ifdef CONFIG_DRM_AMDGPU_SI
5968 	case CHIP_TAHITI:
5969 	case CHIP_PITCAIRN:
5970 	case CHIP_VERDE:
5971 	case CHIP_OLAND:
5972 #endif
5973 #ifdef CONFIG_DRM_AMDGPU_CIK
5974 	case CHIP_BONAIRE:
5975 	case CHIP_HAWAII:
5976 	case CHIP_KAVERI:
5977 	case CHIP_KABINI:
5978 	case CHIP_MULLINS:
5979 #endif
5980 	case CHIP_TONGA:
5981 	case CHIP_FIJI:
5982 	case CHIP_POLARIS10:
5983 	case CHIP_POLARIS11:
5984 	case CHIP_POLARIS12:
5985 	case CHIP_VEGAM:
5986 	case CHIP_CARRIZO:
5987 	case CHIP_STONEY:
5988 		/* chips with display hardware */
5989 		return true;
5990 	default:
5991 		/* IP discovery */
5992 		if (!adev->ip_versions[DCE_HWIP][0] ||
5993 		    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
5994 			return false;
5995 		return true;
5996 	}
5997 }
5998