1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
35 #include <linux/devcoredump.h>
36 #include <generated/utsrelease.h>
37 #include <linux/pci-p2pdma.h>
38 #include <linux/apple-gmux.h>
39
40 #include <drm/drm_aperture.h>
41 #include <drm/drm_atomic_helper.h>
42 #include <drm/drm_crtc_helper.h>
43 #include <drm/drm_fb_helper.h>
44 #include <drm/drm_probe_helper.h>
45 #include <drm/amdgpu_drm.h>
46 #include <linux/device.h>
47 #include <linux/vgaarb.h>
48 #include <linux/vga_switcheroo.h>
49 #include <linux/efi.h>
50 #include "amdgpu.h"
51 #include "amdgpu_trace.h"
52 #include "amdgpu_i2c.h"
53 #include "atom.h"
54 #include "amdgpu_atombios.h"
55 #include "amdgpu_atomfirmware.h"
56 #include "amd_pcie.h"
57 #ifdef CONFIG_DRM_AMDGPU_SI
58 #include "si.h"
59 #endif
60 #ifdef CONFIG_DRM_AMDGPU_CIK
61 #include "cik.h"
62 #endif
63 #include "vi.h"
64 #include "soc15.h"
65 #include "nv.h"
66 #include "bif/bif_4_1_d.h"
67 #include <linux/firmware.h>
68 #include "amdgpu_vf_error.h"
69
70 #include "amdgpu_amdkfd.h"
71 #include "amdgpu_pm.h"
72
73 #include "amdgpu_xgmi.h"
74 #include "amdgpu_ras.h"
75 #include "amdgpu_pmu.h"
76 #include "amdgpu_fru_eeprom.h"
77 #include "amdgpu_reset.h"
78
79 #include <linux/suspend.h>
80 #include <drm/task_barrier.h>
81 #include <linux/pm_runtime.h>
82
83 #include <drm/drm_drv.h>
84
85 #if IS_ENABLED(CONFIG_X86)
86 #include <asm/intel-family.h>
87 #endif
88
89 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
90 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
91 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
92 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
93 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
94 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
95 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
96
97 #define AMDGPU_RESUME_MS 2000
98 #define AMDGPU_MAX_RETRY_LIMIT 2
99 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
100
101 static const struct drm_driver amdgpu_kms_driver;
102
103 const char *amdgpu_asic_name[] = {
104 "TAHITI",
105 "PITCAIRN",
106 "VERDE",
107 "OLAND",
108 "HAINAN",
109 "BONAIRE",
110 "KAVERI",
111 "KABINI",
112 "HAWAII",
113 "MULLINS",
114 "TOPAZ",
115 "TONGA",
116 "FIJI",
117 "CARRIZO",
118 "STONEY",
119 "POLARIS10",
120 "POLARIS11",
121 "POLARIS12",
122 "VEGAM",
123 "VEGA10",
124 "VEGA12",
125 "VEGA20",
126 "RAVEN",
127 "ARCTURUS",
128 "RENOIR",
129 "ALDEBARAN",
130 "NAVI10",
131 "CYAN_SKILLFISH",
132 "NAVI14",
133 "NAVI12",
134 "SIENNA_CICHLID",
135 "NAVY_FLOUNDER",
136 "VANGOGH",
137 "DIMGREY_CAVEFISH",
138 "BEIGE_GOBY",
139 "YELLOW_CARP",
140 "IP DISCOVERY",
141 "LAST",
142 };
143
144 /**
145 * DOC: pcie_replay_count
146 *
147 * The amdgpu driver provides a sysfs API for reporting the total number
148 * of PCIe replays (NAKs)
149 * The file pcie_replay_count is used for this and returns the total
150 * number of replays as a sum of the NAKs generated and NAKs received
151 */
152
amdgpu_device_get_pcie_replay_count(struct device * dev,struct device_attribute * attr,char * buf)153 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
154 struct device_attribute *attr, char *buf)
155 {
156 struct drm_device *ddev = dev_get_drvdata(dev);
157 struct amdgpu_device *adev = drm_to_adev(ddev);
158 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
159
160 return sysfs_emit(buf, "%llu\n", cnt);
161 }
162
163 static DEVICE_ATTR(pcie_replay_count, 0444,
164 amdgpu_device_get_pcie_replay_count, NULL);
165
166 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
167
168
169 /**
170 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
171 *
172 * @dev: drm_device pointer
173 *
174 * Returns true if the device is a dGPU with ATPX power control,
175 * otherwise return false.
176 */
amdgpu_device_supports_px(struct drm_device * dev)177 bool amdgpu_device_supports_px(struct drm_device *dev)
178 {
179 struct amdgpu_device *adev = drm_to_adev(dev);
180
181 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
182 return true;
183 return false;
184 }
185
186 /**
187 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
188 *
189 * @dev: drm_device pointer
190 *
191 * Returns true if the device is a dGPU with ACPI power control,
192 * otherwise return false.
193 */
amdgpu_device_supports_boco(struct drm_device * dev)194 bool amdgpu_device_supports_boco(struct drm_device *dev)
195 {
196 struct amdgpu_device *adev = drm_to_adev(dev);
197
198 if (adev->has_pr3 ||
199 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
200 return true;
201 return false;
202 }
203
204 /**
205 * amdgpu_device_supports_baco - Does the device support BACO
206 *
207 * @dev: drm_device pointer
208 *
209 * Returns true if the device supporte BACO,
210 * otherwise return false.
211 */
amdgpu_device_supports_baco(struct drm_device * dev)212 bool amdgpu_device_supports_baco(struct drm_device *dev)
213 {
214 struct amdgpu_device *adev = drm_to_adev(dev);
215
216 return amdgpu_asic_supports_baco(adev);
217 }
218
219 /**
220 * amdgpu_device_supports_smart_shift - Is the device dGPU with
221 * smart shift support
222 *
223 * @dev: drm_device pointer
224 *
225 * Returns true if the device is a dGPU with Smart Shift support,
226 * otherwise returns false.
227 */
amdgpu_device_supports_smart_shift(struct drm_device * dev)228 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
229 {
230 return (amdgpu_device_supports_boco(dev) &&
231 amdgpu_acpi_is_power_shift_control_supported());
232 }
233
234 /*
235 * VRAM access helper functions
236 */
237
238 /**
239 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
240 *
241 * @adev: amdgpu_device pointer
242 * @pos: offset of the buffer in vram
243 * @buf: virtual address of the buffer in system memory
244 * @size: read/write size, sizeof(@buf) must > @size
245 * @write: true - write to vram, otherwise - read from vram
246 */
amdgpu_device_mm_access(struct amdgpu_device * adev,loff_t pos,void * buf,size_t size,bool write)247 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
248 void *buf, size_t size, bool write)
249 {
250 unsigned long flags;
251 uint32_t hi = ~0, tmp = 0;
252 uint32_t *data = buf;
253 uint64_t last;
254 int idx;
255
256 if (!drm_dev_enter(adev_to_drm(adev), &idx))
257 return;
258
259 BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
260
261 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
262 for (last = pos + size; pos < last; pos += 4) {
263 tmp = pos >> 31;
264
265 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
266 if (tmp != hi) {
267 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
268 hi = tmp;
269 }
270 if (write)
271 WREG32_NO_KIQ(mmMM_DATA, *data++);
272 else
273 *data++ = RREG32_NO_KIQ(mmMM_DATA);
274 }
275
276 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
277 drm_dev_exit(idx);
278 }
279
280 /**
281 * amdgpu_device_aper_access - access vram by vram aperature
282 *
283 * @adev: amdgpu_device pointer
284 * @pos: offset of the buffer in vram
285 * @buf: virtual address of the buffer in system memory
286 * @size: read/write size, sizeof(@buf) must > @size
287 * @write: true - write to vram, otherwise - read from vram
288 *
289 * The return value means how many bytes have been transferred.
290 */
amdgpu_device_aper_access(struct amdgpu_device * adev,loff_t pos,void * buf,size_t size,bool write)291 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
292 void *buf, size_t size, bool write)
293 {
294 #ifdef CONFIG_64BIT
295 void __iomem *addr;
296 size_t count = 0;
297 uint64_t last;
298
299 if (!adev->mman.aper_base_kaddr)
300 return 0;
301
302 last = min(pos + size, adev->gmc.visible_vram_size);
303 if (last > pos) {
304 addr = adev->mman.aper_base_kaddr + pos;
305 count = last - pos;
306
307 if (write) {
308 memcpy_toio(addr, buf, count);
309 /* Make sure HDP write cache flush happens without any reordering
310 * after the system memory contents are sent over PCIe device
311 */
312 mb();
313 amdgpu_device_flush_hdp(adev, NULL);
314 } else {
315 amdgpu_device_invalidate_hdp(adev, NULL);
316 /* Make sure HDP read cache is invalidated before issuing a read
317 * to the PCIe device
318 */
319 mb();
320 memcpy_fromio(buf, addr, count);
321 }
322
323 }
324
325 return count;
326 #else
327 return 0;
328 #endif
329 }
330
331 /**
332 * amdgpu_device_vram_access - read/write a buffer in vram
333 *
334 * @adev: amdgpu_device pointer
335 * @pos: offset of the buffer in vram
336 * @buf: virtual address of the buffer in system memory
337 * @size: read/write size, sizeof(@buf) must > @size
338 * @write: true - write to vram, otherwise - read from vram
339 */
amdgpu_device_vram_access(struct amdgpu_device * adev,loff_t pos,void * buf,size_t size,bool write)340 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
341 void *buf, size_t size, bool write)
342 {
343 size_t count;
344
345 /* try to using vram apreature to access vram first */
346 count = amdgpu_device_aper_access(adev, pos, buf, size, write);
347 size -= count;
348 if (size) {
349 /* using MM to access rest vram */
350 pos += count;
351 buf += count;
352 amdgpu_device_mm_access(adev, pos, buf, size, write);
353 }
354 }
355
356 /*
357 * register access helper functions.
358 */
359
360 /* Check if hw access should be skipped because of hotplug or device error */
amdgpu_device_skip_hw_access(struct amdgpu_device * adev)361 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
362 {
363 if (adev->no_hw_access)
364 return true;
365
366 #ifdef CONFIG_LOCKDEP
367 /*
368 * This is a bit complicated to understand, so worth a comment. What we assert
369 * here is that the GPU reset is not running on another thread in parallel.
370 *
371 * For this we trylock the read side of the reset semaphore, if that succeeds
372 * we know that the reset is not running in paralell.
373 *
374 * If the trylock fails we assert that we are either already holding the read
375 * side of the lock or are the reset thread itself and hold the write side of
376 * the lock.
377 */
378 if (in_task()) {
379 if (down_read_trylock(&adev->reset_domain->sem))
380 up_read(&adev->reset_domain->sem);
381 else
382 lockdep_assert_held(&adev->reset_domain->sem);
383 }
384 #endif
385 return false;
386 }
387
388 /**
389 * amdgpu_device_rreg - read a memory mapped IO or indirect register
390 *
391 * @adev: amdgpu_device pointer
392 * @reg: dword aligned register offset
393 * @acc_flags: access flags which require special behavior
394 *
395 * Returns the 32 bit value from the offset specified.
396 */
amdgpu_device_rreg(struct amdgpu_device * adev,uint32_t reg,uint32_t acc_flags)397 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
398 uint32_t reg, uint32_t acc_flags)
399 {
400 uint32_t ret;
401
402 if (amdgpu_device_skip_hw_access(adev))
403 return 0;
404
405 if ((reg * 4) < adev->rmmio_size) {
406 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
407 amdgpu_sriov_runtime(adev) &&
408 down_read_trylock(&adev->reset_domain->sem)) {
409 ret = amdgpu_kiq_rreg(adev, reg);
410 up_read(&adev->reset_domain->sem);
411 } else {
412 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
413 }
414 } else {
415 ret = adev->pcie_rreg(adev, reg * 4);
416 }
417
418 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
419
420 return ret;
421 }
422
423 /*
424 * MMIO register read with bytes helper functions
425 * @offset:bytes offset from MMIO start
426 */
427
428 /**
429 * amdgpu_mm_rreg8 - read a memory mapped IO register
430 *
431 * @adev: amdgpu_device pointer
432 * @offset: byte aligned register offset
433 *
434 * Returns the 8 bit value from the offset specified.
435 */
amdgpu_mm_rreg8(struct amdgpu_device * adev,uint32_t offset)436 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
437 {
438 if (amdgpu_device_skip_hw_access(adev))
439 return 0;
440
441 if (offset < adev->rmmio_size)
442 return (readb(adev->rmmio + offset));
443 BUG();
444 }
445
446 /*
447 * MMIO register write with bytes helper functions
448 * @offset:bytes offset from MMIO start
449 * @value: the value want to be written to the register
450 */
451
452 /**
453 * amdgpu_mm_wreg8 - read a memory mapped IO register
454 *
455 * @adev: amdgpu_device pointer
456 * @offset: byte aligned register offset
457 * @value: 8 bit value to write
458 *
459 * Writes the value specified to the offset specified.
460 */
amdgpu_mm_wreg8(struct amdgpu_device * adev,uint32_t offset,uint8_t value)461 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
462 {
463 if (amdgpu_device_skip_hw_access(adev))
464 return;
465
466 if (offset < adev->rmmio_size)
467 writeb(value, adev->rmmio + offset);
468 else
469 BUG();
470 }
471
472 /**
473 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
474 *
475 * @adev: amdgpu_device pointer
476 * @reg: dword aligned register offset
477 * @v: 32 bit value to write to the register
478 * @acc_flags: access flags which require special behavior
479 *
480 * Writes the value specified to the offset specified.
481 */
amdgpu_device_wreg(struct amdgpu_device * adev,uint32_t reg,uint32_t v,uint32_t acc_flags)482 void amdgpu_device_wreg(struct amdgpu_device *adev,
483 uint32_t reg, uint32_t v,
484 uint32_t acc_flags)
485 {
486 if (amdgpu_device_skip_hw_access(adev))
487 return;
488
489 if ((reg * 4) < adev->rmmio_size) {
490 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
491 amdgpu_sriov_runtime(adev) &&
492 down_read_trylock(&adev->reset_domain->sem)) {
493 amdgpu_kiq_wreg(adev, reg, v);
494 up_read(&adev->reset_domain->sem);
495 } else {
496 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
497 }
498 } else {
499 adev->pcie_wreg(adev, reg * 4, v);
500 }
501
502 trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
503 }
504
505 /**
506 * amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if in range
507 *
508 * @adev: amdgpu_device pointer
509 * @reg: mmio/rlc register
510 * @v: value to write
511 *
512 * this function is invoked only for the debugfs register access
513 */
amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device * adev,uint32_t reg,uint32_t v,uint32_t xcc_id)514 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
515 uint32_t reg, uint32_t v,
516 uint32_t xcc_id)
517 {
518 if (amdgpu_device_skip_hw_access(adev))
519 return;
520
521 if (amdgpu_sriov_fullaccess(adev) &&
522 adev->gfx.rlc.funcs &&
523 adev->gfx.rlc.funcs->is_rlcg_access_range) {
524 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
525 return amdgpu_sriov_wreg(adev, reg, v, 0, 0, xcc_id);
526 } else if ((reg * 4) >= adev->rmmio_size) {
527 adev->pcie_wreg(adev, reg * 4, v);
528 } else {
529 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
530 }
531 }
532
533 /**
534 * amdgpu_device_indirect_rreg - read an indirect register
535 *
536 * @adev: amdgpu_device pointer
537 * @reg_addr: indirect register address to read from
538 *
539 * Returns the value of indirect register @reg_addr
540 */
amdgpu_device_indirect_rreg(struct amdgpu_device * adev,u32 reg_addr)541 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
542 u32 reg_addr)
543 {
544 unsigned long flags, pcie_index, pcie_data;
545 void __iomem *pcie_index_offset;
546 void __iomem *pcie_data_offset;
547 u32 r;
548
549 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
550 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
551
552 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
553 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
554 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
555
556 writel(reg_addr, pcie_index_offset);
557 readl(pcie_index_offset);
558 r = readl(pcie_data_offset);
559 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
560
561 return r;
562 }
563
amdgpu_device_indirect_rreg_ext(struct amdgpu_device * adev,u64 reg_addr)564 u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
565 u64 reg_addr)
566 {
567 unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
568 u32 r;
569 void __iomem *pcie_index_offset;
570 void __iomem *pcie_index_hi_offset;
571 void __iomem *pcie_data_offset;
572
573 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
574 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
575 if (adev->nbio.funcs->get_pcie_index_hi_offset)
576 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
577 else
578 pcie_index_hi = 0;
579
580 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
581 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
582 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
583 if (pcie_index_hi != 0)
584 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
585 pcie_index_hi * 4;
586
587 writel(reg_addr, pcie_index_offset);
588 readl(pcie_index_offset);
589 if (pcie_index_hi != 0) {
590 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
591 readl(pcie_index_hi_offset);
592 }
593 r = readl(pcie_data_offset);
594
595 /* clear the high bits */
596 if (pcie_index_hi != 0) {
597 writel(0, pcie_index_hi_offset);
598 readl(pcie_index_hi_offset);
599 }
600
601 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
602
603 return r;
604 }
605
606 /**
607 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
608 *
609 * @adev: amdgpu_device pointer
610 * @reg_addr: indirect register address to read from
611 *
612 * Returns the value of indirect register @reg_addr
613 */
amdgpu_device_indirect_rreg64(struct amdgpu_device * adev,u32 reg_addr)614 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
615 u32 reg_addr)
616 {
617 unsigned long flags, pcie_index, pcie_data;
618 void __iomem *pcie_index_offset;
619 void __iomem *pcie_data_offset;
620 u64 r;
621
622 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
623 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
624
625 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
626 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
627 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
628
629 /* read low 32 bits */
630 writel(reg_addr, pcie_index_offset);
631 readl(pcie_index_offset);
632 r = readl(pcie_data_offset);
633 /* read high 32 bits */
634 writel(reg_addr + 4, pcie_index_offset);
635 readl(pcie_index_offset);
636 r |= ((u64)readl(pcie_data_offset) << 32);
637 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
638
639 return r;
640 }
641
642 /**
643 * amdgpu_device_indirect_wreg - write an indirect register address
644 *
645 * @adev: amdgpu_device pointer
646 * @reg_addr: indirect register offset
647 * @reg_data: indirect register data
648 *
649 */
amdgpu_device_indirect_wreg(struct amdgpu_device * adev,u32 reg_addr,u32 reg_data)650 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
651 u32 reg_addr, u32 reg_data)
652 {
653 unsigned long flags, pcie_index, pcie_data;
654 void __iomem *pcie_index_offset;
655 void __iomem *pcie_data_offset;
656
657 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
658 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
659
660 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
661 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
662 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
663
664 writel(reg_addr, pcie_index_offset);
665 readl(pcie_index_offset);
666 writel(reg_data, pcie_data_offset);
667 readl(pcie_data_offset);
668 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
669 }
670
amdgpu_device_indirect_wreg_ext(struct amdgpu_device * adev,u64 reg_addr,u32 reg_data)671 void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
672 u64 reg_addr, u32 reg_data)
673 {
674 unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
675 void __iomem *pcie_index_offset;
676 void __iomem *pcie_index_hi_offset;
677 void __iomem *pcie_data_offset;
678
679 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
680 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
681 if (adev->nbio.funcs->get_pcie_index_hi_offset)
682 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
683 else
684 pcie_index_hi = 0;
685
686 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
687 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
688 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
689 if (pcie_index_hi != 0)
690 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
691 pcie_index_hi * 4;
692
693 writel(reg_addr, pcie_index_offset);
694 readl(pcie_index_offset);
695 if (pcie_index_hi != 0) {
696 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
697 readl(pcie_index_hi_offset);
698 }
699 writel(reg_data, pcie_data_offset);
700 readl(pcie_data_offset);
701
702 /* clear the high bits */
703 if (pcie_index_hi != 0) {
704 writel(0, pcie_index_hi_offset);
705 readl(pcie_index_hi_offset);
706 }
707
708 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
709 }
710
711 /**
712 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
713 *
714 * @adev: amdgpu_device pointer
715 * @reg_addr: indirect register offset
716 * @reg_data: indirect register data
717 *
718 */
amdgpu_device_indirect_wreg64(struct amdgpu_device * adev,u32 reg_addr,u64 reg_data)719 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
720 u32 reg_addr, u64 reg_data)
721 {
722 unsigned long flags, pcie_index, pcie_data;
723 void __iomem *pcie_index_offset;
724 void __iomem *pcie_data_offset;
725
726 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
727 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
728
729 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
730 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
731 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
732
733 /* write low 32 bits */
734 writel(reg_addr, pcie_index_offset);
735 readl(pcie_index_offset);
736 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
737 readl(pcie_data_offset);
738 /* write high 32 bits */
739 writel(reg_addr + 4, pcie_index_offset);
740 readl(pcie_index_offset);
741 writel((u32)(reg_data >> 32), pcie_data_offset);
742 readl(pcie_data_offset);
743 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
744 }
745
746 /**
747 * amdgpu_device_get_rev_id - query device rev_id
748 *
749 * @adev: amdgpu_device pointer
750 *
751 * Return device rev_id
752 */
amdgpu_device_get_rev_id(struct amdgpu_device * adev)753 u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
754 {
755 return adev->nbio.funcs->get_rev_id(adev);
756 }
757
758 /**
759 * amdgpu_invalid_rreg - dummy reg read function
760 *
761 * @adev: amdgpu_device pointer
762 * @reg: offset of register
763 *
764 * Dummy register read function. Used for register blocks
765 * that certain asics don't have (all asics).
766 * Returns the value in the register.
767 */
amdgpu_invalid_rreg(struct amdgpu_device * adev,uint32_t reg)768 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
769 {
770 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
771 BUG();
772 return 0;
773 }
774
amdgpu_invalid_rreg_ext(struct amdgpu_device * adev,uint64_t reg)775 static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg)
776 {
777 DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
778 BUG();
779 return 0;
780 }
781
782 /**
783 * amdgpu_invalid_wreg - dummy reg write function
784 *
785 * @adev: amdgpu_device pointer
786 * @reg: offset of register
787 * @v: value to write to the register
788 *
789 * Dummy register read function. Used for register blocks
790 * that certain asics don't have (all asics).
791 */
amdgpu_invalid_wreg(struct amdgpu_device * adev,uint32_t reg,uint32_t v)792 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
793 {
794 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
795 reg, v);
796 BUG();
797 }
798
amdgpu_invalid_wreg_ext(struct amdgpu_device * adev,uint64_t reg,uint32_t v)799 static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v)
800 {
801 DRM_ERROR("Invalid callback to write register 0x%llX with 0x%08X\n",
802 reg, v);
803 BUG();
804 }
805
806 /**
807 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
808 *
809 * @adev: amdgpu_device pointer
810 * @reg: offset of register
811 *
812 * Dummy register read function. Used for register blocks
813 * that certain asics don't have (all asics).
814 * Returns the value in the register.
815 */
amdgpu_invalid_rreg64(struct amdgpu_device * adev,uint32_t reg)816 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
817 {
818 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
819 BUG();
820 return 0;
821 }
822
823 /**
824 * amdgpu_invalid_wreg64 - dummy reg write function
825 *
826 * @adev: amdgpu_device pointer
827 * @reg: offset of register
828 * @v: value to write to the register
829 *
830 * Dummy register read function. Used for register blocks
831 * that certain asics don't have (all asics).
832 */
amdgpu_invalid_wreg64(struct amdgpu_device * adev,uint32_t reg,uint64_t v)833 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
834 {
835 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
836 reg, v);
837 BUG();
838 }
839
840 /**
841 * amdgpu_block_invalid_rreg - dummy reg read function
842 *
843 * @adev: amdgpu_device pointer
844 * @block: offset of instance
845 * @reg: offset of register
846 *
847 * Dummy register read function. Used for register blocks
848 * that certain asics don't have (all asics).
849 * Returns the value in the register.
850 */
amdgpu_block_invalid_rreg(struct amdgpu_device * adev,uint32_t block,uint32_t reg)851 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
852 uint32_t block, uint32_t reg)
853 {
854 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
855 reg, block);
856 BUG();
857 return 0;
858 }
859
860 /**
861 * amdgpu_block_invalid_wreg - dummy reg write function
862 *
863 * @adev: amdgpu_device pointer
864 * @block: offset of instance
865 * @reg: offset of register
866 * @v: value to write to the register
867 *
868 * Dummy register read function. Used for register blocks
869 * that certain asics don't have (all asics).
870 */
amdgpu_block_invalid_wreg(struct amdgpu_device * adev,uint32_t block,uint32_t reg,uint32_t v)871 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
872 uint32_t block,
873 uint32_t reg, uint32_t v)
874 {
875 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
876 reg, block, v);
877 BUG();
878 }
879
880 /**
881 * amdgpu_device_asic_init - Wrapper for atom asic_init
882 *
883 * @adev: amdgpu_device pointer
884 *
885 * Does any asic specific work and then calls atom asic init.
886 */
amdgpu_device_asic_init(struct amdgpu_device * adev)887 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
888 {
889 int ret;
890
891 amdgpu_asic_pre_asic_init(adev);
892
893 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) ||
894 adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) {
895 amdgpu_psp_wait_for_bootloader(adev);
896 ret = amdgpu_atomfirmware_asic_init(adev, true);
897 return ret;
898 } else {
899 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
900 }
901
902 return 0;
903 }
904
905 /**
906 * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
907 *
908 * @adev: amdgpu_device pointer
909 *
910 * Allocates a scratch page of VRAM for use by various things in the
911 * driver.
912 */
amdgpu_device_mem_scratch_init(struct amdgpu_device * adev)913 static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
914 {
915 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
916 AMDGPU_GEM_DOMAIN_VRAM |
917 AMDGPU_GEM_DOMAIN_GTT,
918 &adev->mem_scratch.robj,
919 &adev->mem_scratch.gpu_addr,
920 (void **)&adev->mem_scratch.ptr);
921 }
922
923 /**
924 * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
925 *
926 * @adev: amdgpu_device pointer
927 *
928 * Frees the VRAM scratch page.
929 */
amdgpu_device_mem_scratch_fini(struct amdgpu_device * adev)930 static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
931 {
932 amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
933 }
934
935 /**
936 * amdgpu_device_program_register_sequence - program an array of registers.
937 *
938 * @adev: amdgpu_device pointer
939 * @registers: pointer to the register array
940 * @array_size: size of the register array
941 *
942 * Programs an array or registers with and or masks.
943 * This is a helper for setting golden registers.
944 */
amdgpu_device_program_register_sequence(struct amdgpu_device * adev,const u32 * registers,const u32 array_size)945 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
946 const u32 *registers,
947 const u32 array_size)
948 {
949 u32 tmp, reg, and_mask, or_mask;
950 int i;
951
952 if (array_size % 3)
953 return;
954
955 for (i = 0; i < array_size; i += 3) {
956 reg = registers[i + 0];
957 and_mask = registers[i + 1];
958 or_mask = registers[i + 2];
959
960 if (and_mask == 0xffffffff) {
961 tmp = or_mask;
962 } else {
963 tmp = RREG32(reg);
964 tmp &= ~and_mask;
965 if (adev->family >= AMDGPU_FAMILY_AI)
966 tmp |= (or_mask & and_mask);
967 else
968 tmp |= or_mask;
969 }
970 WREG32(reg, tmp);
971 }
972 }
973
974 /**
975 * amdgpu_device_pci_config_reset - reset the GPU
976 *
977 * @adev: amdgpu_device pointer
978 *
979 * Resets the GPU using the pci config reset sequence.
980 * Only applicable to asics prior to vega10.
981 */
amdgpu_device_pci_config_reset(struct amdgpu_device * adev)982 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
983 {
984 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
985 }
986
987 /**
988 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
989 *
990 * @adev: amdgpu_device pointer
991 *
992 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
993 */
amdgpu_device_pci_reset(struct amdgpu_device * adev)994 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
995 {
996 return pci_reset_function(adev->pdev);
997 }
998
999 /*
1000 * amdgpu_device_wb_*()
1001 * Writeback is the method by which the GPU updates special pages in memory
1002 * with the status of certain GPU events (fences, ring pointers,etc.).
1003 */
1004
1005 /**
1006 * amdgpu_device_wb_fini - Disable Writeback and free memory
1007 *
1008 * @adev: amdgpu_device pointer
1009 *
1010 * Disables Writeback and frees the Writeback memory (all asics).
1011 * Used at driver shutdown.
1012 */
amdgpu_device_wb_fini(struct amdgpu_device * adev)1013 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1014 {
1015 if (adev->wb.wb_obj) {
1016 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1017 &adev->wb.gpu_addr,
1018 (void **)&adev->wb.wb);
1019 adev->wb.wb_obj = NULL;
1020 }
1021 }
1022
1023 /**
1024 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1025 *
1026 * @adev: amdgpu_device pointer
1027 *
1028 * Initializes writeback and allocates writeback memory (all asics).
1029 * Used at driver startup.
1030 * Returns 0 on success or an -error on failure.
1031 */
amdgpu_device_wb_init(struct amdgpu_device * adev)1032 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1033 {
1034 int r;
1035
1036 if (adev->wb.wb_obj == NULL) {
1037 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1038 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1039 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1040 &adev->wb.wb_obj, &adev->wb.gpu_addr,
1041 (void **)&adev->wb.wb);
1042 if (r) {
1043 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1044 return r;
1045 }
1046
1047 adev->wb.num_wb = AMDGPU_MAX_WB;
1048 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1049
1050 /* clear wb memory */
1051 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1052 }
1053
1054 return 0;
1055 }
1056
1057 /**
1058 * amdgpu_device_wb_get - Allocate a wb entry
1059 *
1060 * @adev: amdgpu_device pointer
1061 * @wb: wb index
1062 *
1063 * Allocate a wb slot for use by the driver (all asics).
1064 * Returns 0 on success or -EINVAL on failure.
1065 */
amdgpu_device_wb_get(struct amdgpu_device * adev,u32 * wb)1066 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1067 {
1068 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1069
1070 if (offset < adev->wb.num_wb) {
1071 __set_bit(offset, adev->wb.used);
1072 *wb = offset << 3; /* convert to dw offset */
1073 return 0;
1074 } else {
1075 return -EINVAL;
1076 }
1077 }
1078
1079 /**
1080 * amdgpu_device_wb_free - Free a wb entry
1081 *
1082 * @adev: amdgpu_device pointer
1083 * @wb: wb index
1084 *
1085 * Free a wb slot allocated for use by the driver (all asics)
1086 */
amdgpu_device_wb_free(struct amdgpu_device * adev,u32 wb)1087 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1088 {
1089 wb >>= 3;
1090 if (wb < adev->wb.num_wb)
1091 __clear_bit(wb, adev->wb.used);
1092 }
1093
1094 /**
1095 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1096 *
1097 * @adev: amdgpu_device pointer
1098 *
1099 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1100 * to fail, but if any of the BARs is not accessible after the size we abort
1101 * driver loading by returning -ENODEV.
1102 */
amdgpu_device_resize_fb_bar(struct amdgpu_device * adev)1103 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1104 {
1105 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1106 struct pci_bus *root;
1107 struct resource *res;
1108 unsigned int i;
1109 u16 cmd;
1110 int r;
1111
1112 if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
1113 return 0;
1114
1115 /* Bypass for VF */
1116 if (amdgpu_sriov_vf(adev))
1117 return 0;
1118
1119 /* skip if the bios has already enabled large BAR */
1120 if (adev->gmc.real_vram_size &&
1121 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1122 return 0;
1123
1124 /* Check if the root BUS has 64bit memory resources */
1125 root = adev->pdev->bus;
1126 while (root->parent)
1127 root = root->parent;
1128
1129 pci_bus_for_each_resource(root, res, i) {
1130 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1131 res->start > 0x100000000ull)
1132 break;
1133 }
1134
1135 /* Trying to resize is pointless without a root hub window above 4GB */
1136 if (!res)
1137 return 0;
1138
1139 /* Limit the BAR size to what is available */
1140 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1141 rbar_size);
1142
1143 /* Disable memory decoding while we change the BAR addresses and size */
1144 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1145 pci_write_config_word(adev->pdev, PCI_COMMAND,
1146 cmd & ~PCI_COMMAND_MEMORY);
1147
1148 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1149 amdgpu_doorbell_fini(adev);
1150 if (adev->asic_type >= CHIP_BONAIRE)
1151 pci_release_resource(adev->pdev, 2);
1152
1153 pci_release_resource(adev->pdev, 0);
1154
1155 r = pci_resize_resource(adev->pdev, 0, rbar_size);
1156 if (r == -ENOSPC)
1157 DRM_INFO("Not enough PCI address space for a large BAR.");
1158 else if (r && r != -ENOTSUPP)
1159 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1160
1161 pci_assign_unassigned_bus_resources(adev->pdev->bus);
1162
1163 /* When the doorbell or fb BAR isn't available we have no chance of
1164 * using the device.
1165 */
1166 r = amdgpu_doorbell_init(adev);
1167 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1168 return -ENODEV;
1169
1170 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1171
1172 return 0;
1173 }
1174
amdgpu_device_read_bios(struct amdgpu_device * adev)1175 static bool amdgpu_device_read_bios(struct amdgpu_device *adev)
1176 {
1177 if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
1178 return false;
1179
1180 return true;
1181 }
1182
1183 /*
1184 * GPU helpers function.
1185 */
1186 /**
1187 * amdgpu_device_need_post - check if the hw need post or not
1188 *
1189 * @adev: amdgpu_device pointer
1190 *
1191 * Check if the asic has been initialized (all asics) at driver startup
1192 * or post is needed if hw reset is performed.
1193 * Returns true if need or false if not.
1194 */
amdgpu_device_need_post(struct amdgpu_device * adev)1195 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1196 {
1197 uint32_t reg;
1198
1199 if (amdgpu_sriov_vf(adev))
1200 return false;
1201
1202 if (!amdgpu_device_read_bios(adev))
1203 return false;
1204
1205 if (amdgpu_passthrough(adev)) {
1206 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1207 * some old smc fw still need driver do vPost otherwise gpu hang, while
1208 * those smc fw version above 22.15 doesn't have this flaw, so we force
1209 * vpost executed for smc version below 22.15
1210 */
1211 if (adev->asic_type == CHIP_FIJI) {
1212 int err;
1213 uint32_t fw_ver;
1214
1215 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1216 /* force vPost if error occured */
1217 if (err)
1218 return true;
1219
1220 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1221 release_firmware(adev->pm.fw);
1222 if (fw_ver < 0x00160e00)
1223 return true;
1224 }
1225 }
1226
1227 /* Don't post if we need to reset whole hive on init */
1228 if (adev->gmc.xgmi.pending_reset)
1229 return false;
1230
1231 if (adev->has_hw_reset) {
1232 adev->has_hw_reset = false;
1233 return true;
1234 }
1235
1236 /* bios scratch used on CIK+ */
1237 if (adev->asic_type >= CHIP_BONAIRE)
1238 return amdgpu_atombios_scratch_need_asic_init(adev);
1239
1240 /* check MEM_SIZE for older asics */
1241 reg = amdgpu_asic_get_config_memsize(adev);
1242
1243 if ((reg != 0) && (reg != 0xffffffff))
1244 return false;
1245
1246 return true;
1247 }
1248
1249 /*
1250 * Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
1251 * speed switching. Until we have confirmation from Intel that a specific host
1252 * supports it, it's safer that we keep it disabled for all.
1253 *
1254 * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
1255 * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
1256 */
amdgpu_device_pcie_dynamic_switching_supported(void)1257 bool amdgpu_device_pcie_dynamic_switching_supported(void)
1258 {
1259 #if IS_ENABLED(CONFIG_X86)
1260 struct cpuinfo_x86 *c = &cpu_data(0);
1261
1262 if (c->x86_vendor == X86_VENDOR_INTEL)
1263 return false;
1264 #endif
1265 return true;
1266 }
1267
1268 /**
1269 * amdgpu_device_should_use_aspm - check if the device should program ASPM
1270 *
1271 * @adev: amdgpu_device pointer
1272 *
1273 * Confirm whether the module parameter and pcie bridge agree that ASPM should
1274 * be set for this device.
1275 *
1276 * Returns true if it should be used or false if not.
1277 */
amdgpu_device_should_use_aspm(struct amdgpu_device * adev)1278 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1279 {
1280 switch (amdgpu_aspm) {
1281 case -1:
1282 break;
1283 case 0:
1284 return false;
1285 case 1:
1286 return true;
1287 default:
1288 return false;
1289 }
1290 return pcie_aspm_enabled(adev->pdev);
1291 }
1292
amdgpu_device_aspm_support_quirk(void)1293 bool amdgpu_device_aspm_support_quirk(void)
1294 {
1295 #if IS_ENABLED(CONFIG_X86)
1296 struct cpuinfo_x86 *c = &cpu_data(0);
1297
1298 return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
1299 #else
1300 return true;
1301 #endif
1302 }
1303
1304 /* if we get transitioned to only one device, take VGA back */
1305 /**
1306 * amdgpu_device_vga_set_decode - enable/disable vga decode
1307 *
1308 * @pdev: PCI device pointer
1309 * @state: enable/disable vga decode
1310 *
1311 * Enable/disable vga decode (all asics).
1312 * Returns VGA resource flags.
1313 */
amdgpu_device_vga_set_decode(struct pci_dev * pdev,bool state)1314 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1315 bool state)
1316 {
1317 struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1318
1319 amdgpu_asic_set_vga_state(adev, state);
1320 if (state)
1321 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1322 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1323 else
1324 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1325 }
1326
1327 /**
1328 * amdgpu_device_check_block_size - validate the vm block size
1329 *
1330 * @adev: amdgpu_device pointer
1331 *
1332 * Validates the vm block size specified via module parameter.
1333 * The vm block size defines number of bits in page table versus page directory,
1334 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1335 * page table and the remaining bits are in the page directory.
1336 */
amdgpu_device_check_block_size(struct amdgpu_device * adev)1337 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1338 {
1339 /* defines number of bits in page table versus page directory,
1340 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1341 * page table and the remaining bits are in the page directory
1342 */
1343 if (amdgpu_vm_block_size == -1)
1344 return;
1345
1346 if (amdgpu_vm_block_size < 9) {
1347 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1348 amdgpu_vm_block_size);
1349 amdgpu_vm_block_size = -1;
1350 }
1351 }
1352
1353 /**
1354 * amdgpu_device_check_vm_size - validate the vm size
1355 *
1356 * @adev: amdgpu_device pointer
1357 *
1358 * Validates the vm size in GB specified via module parameter.
1359 * The VM size is the size of the GPU virtual memory space in GB.
1360 */
amdgpu_device_check_vm_size(struct amdgpu_device * adev)1361 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1362 {
1363 /* no need to check the default value */
1364 if (amdgpu_vm_size == -1)
1365 return;
1366
1367 if (amdgpu_vm_size < 1) {
1368 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1369 amdgpu_vm_size);
1370 amdgpu_vm_size = -1;
1371 }
1372 }
1373
amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device * adev)1374 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1375 {
1376 struct sysinfo si;
1377 bool is_os_64 = (sizeof(void *) == 8);
1378 uint64_t total_memory;
1379 uint64_t dram_size_seven_GB = 0x1B8000000;
1380 uint64_t dram_size_three_GB = 0xB8000000;
1381
1382 if (amdgpu_smu_memory_pool_size == 0)
1383 return;
1384
1385 if (!is_os_64) {
1386 DRM_WARN("Not 64-bit OS, feature not supported\n");
1387 goto def_value;
1388 }
1389 si_meminfo(&si);
1390 total_memory = (uint64_t)si.totalram * si.mem_unit;
1391
1392 if ((amdgpu_smu_memory_pool_size == 1) ||
1393 (amdgpu_smu_memory_pool_size == 2)) {
1394 if (total_memory < dram_size_three_GB)
1395 goto def_value1;
1396 } else if ((amdgpu_smu_memory_pool_size == 4) ||
1397 (amdgpu_smu_memory_pool_size == 8)) {
1398 if (total_memory < dram_size_seven_GB)
1399 goto def_value1;
1400 } else {
1401 DRM_WARN("Smu memory pool size not supported\n");
1402 goto def_value;
1403 }
1404 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1405
1406 return;
1407
1408 def_value1:
1409 DRM_WARN("No enough system memory\n");
1410 def_value:
1411 adev->pm.smu_prv_buffer_size = 0;
1412 }
1413
amdgpu_device_init_apu_flags(struct amdgpu_device * adev)1414 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1415 {
1416 if (!(adev->flags & AMD_IS_APU) ||
1417 adev->asic_type < CHIP_RAVEN)
1418 return 0;
1419
1420 switch (adev->asic_type) {
1421 case CHIP_RAVEN:
1422 if (adev->pdev->device == 0x15dd)
1423 adev->apu_flags |= AMD_APU_IS_RAVEN;
1424 if (adev->pdev->device == 0x15d8)
1425 adev->apu_flags |= AMD_APU_IS_PICASSO;
1426 break;
1427 case CHIP_RENOIR:
1428 if ((adev->pdev->device == 0x1636) ||
1429 (adev->pdev->device == 0x164c))
1430 adev->apu_flags |= AMD_APU_IS_RENOIR;
1431 else
1432 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1433 break;
1434 case CHIP_VANGOGH:
1435 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1436 break;
1437 case CHIP_YELLOW_CARP:
1438 break;
1439 case CHIP_CYAN_SKILLFISH:
1440 if ((adev->pdev->device == 0x13FE) ||
1441 (adev->pdev->device == 0x143F))
1442 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1443 break;
1444 default:
1445 break;
1446 }
1447
1448 return 0;
1449 }
1450
1451 /**
1452 * amdgpu_device_check_arguments - validate module params
1453 *
1454 * @adev: amdgpu_device pointer
1455 *
1456 * Validates certain module parameters and updates
1457 * the associated values used by the driver (all asics).
1458 */
amdgpu_device_check_arguments(struct amdgpu_device * adev)1459 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1460 {
1461 if (amdgpu_sched_jobs < 4) {
1462 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1463 amdgpu_sched_jobs);
1464 amdgpu_sched_jobs = 4;
1465 } else if (!is_power_of_2(amdgpu_sched_jobs)) {
1466 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1467 amdgpu_sched_jobs);
1468 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1469 }
1470
1471 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1472 /* gart size must be greater or equal to 32M */
1473 dev_warn(adev->dev, "gart size (%d) too small\n",
1474 amdgpu_gart_size);
1475 amdgpu_gart_size = -1;
1476 }
1477
1478 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1479 /* gtt size must be greater or equal to 32M */
1480 dev_warn(adev->dev, "gtt size (%d) too small\n",
1481 amdgpu_gtt_size);
1482 amdgpu_gtt_size = -1;
1483 }
1484
1485 /* valid range is between 4 and 9 inclusive */
1486 if (amdgpu_vm_fragment_size != -1 &&
1487 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1488 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1489 amdgpu_vm_fragment_size = -1;
1490 }
1491
1492 if (amdgpu_sched_hw_submission < 2) {
1493 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1494 amdgpu_sched_hw_submission);
1495 amdgpu_sched_hw_submission = 2;
1496 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1497 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1498 amdgpu_sched_hw_submission);
1499 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1500 }
1501
1502 if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1503 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1504 amdgpu_reset_method = -1;
1505 }
1506
1507 amdgpu_device_check_smu_prv_buffer_size(adev);
1508
1509 amdgpu_device_check_vm_size(adev);
1510
1511 amdgpu_device_check_block_size(adev);
1512
1513 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1514
1515 return 0;
1516 }
1517
1518 /**
1519 * amdgpu_switcheroo_set_state - set switcheroo state
1520 *
1521 * @pdev: pci dev pointer
1522 * @state: vga_switcheroo state
1523 *
1524 * Callback for the switcheroo driver. Suspends or resumes
1525 * the asics before or after it is powered up using ACPI methods.
1526 */
amdgpu_switcheroo_set_state(struct pci_dev * pdev,enum vga_switcheroo_state state)1527 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1528 enum vga_switcheroo_state state)
1529 {
1530 struct drm_device *dev = pci_get_drvdata(pdev);
1531 int r;
1532
1533 if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1534 return;
1535
1536 if (state == VGA_SWITCHEROO_ON) {
1537 pr_info("switched on\n");
1538 /* don't suspend or resume card normally */
1539 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1540
1541 pci_set_power_state(pdev, PCI_D0);
1542 amdgpu_device_load_pci_state(pdev);
1543 r = pci_enable_device(pdev);
1544 if (r)
1545 DRM_WARN("pci_enable_device failed (%d)\n", r);
1546 amdgpu_device_resume(dev, true);
1547
1548 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1549 } else {
1550 pr_info("switched off\n");
1551 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1552 amdgpu_device_prepare(dev);
1553 amdgpu_device_suspend(dev, true);
1554 amdgpu_device_cache_pci_state(pdev);
1555 /* Shut down the device */
1556 pci_disable_device(pdev);
1557 pci_set_power_state(pdev, PCI_D3cold);
1558 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1559 }
1560 }
1561
1562 /**
1563 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1564 *
1565 * @pdev: pci dev pointer
1566 *
1567 * Callback for the switcheroo driver. Check of the switcheroo
1568 * state can be changed.
1569 * Returns true if the state can be changed, false if not.
1570 */
amdgpu_switcheroo_can_switch(struct pci_dev * pdev)1571 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1572 {
1573 struct drm_device *dev = pci_get_drvdata(pdev);
1574
1575 /*
1576 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1577 * locking inversion with the driver load path. And the access here is
1578 * completely racy anyway. So don't bother with locking for now.
1579 */
1580 return atomic_read(&dev->open_count) == 0;
1581 }
1582
1583 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1584 .set_gpu_state = amdgpu_switcheroo_set_state,
1585 .reprobe = NULL,
1586 .can_switch = amdgpu_switcheroo_can_switch,
1587 };
1588
1589 /**
1590 * amdgpu_device_ip_set_clockgating_state - set the CG state
1591 *
1592 * @dev: amdgpu_device pointer
1593 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1594 * @state: clockgating state (gate or ungate)
1595 *
1596 * Sets the requested clockgating state for all instances of
1597 * the hardware IP specified.
1598 * Returns the error code from the last instance.
1599 */
amdgpu_device_ip_set_clockgating_state(void * dev,enum amd_ip_block_type block_type,enum amd_clockgating_state state)1600 int amdgpu_device_ip_set_clockgating_state(void *dev,
1601 enum amd_ip_block_type block_type,
1602 enum amd_clockgating_state state)
1603 {
1604 struct amdgpu_device *adev = dev;
1605 int i, r = 0;
1606
1607 for (i = 0; i < adev->num_ip_blocks; i++) {
1608 if (!adev->ip_blocks[i].status.valid)
1609 continue;
1610 if (adev->ip_blocks[i].version->type != block_type)
1611 continue;
1612 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1613 continue;
1614 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1615 (void *)adev, state);
1616 if (r)
1617 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1618 adev->ip_blocks[i].version->funcs->name, r);
1619 }
1620 return r;
1621 }
1622
1623 /**
1624 * amdgpu_device_ip_set_powergating_state - set the PG state
1625 *
1626 * @dev: amdgpu_device pointer
1627 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1628 * @state: powergating state (gate or ungate)
1629 *
1630 * Sets the requested powergating state for all instances of
1631 * the hardware IP specified.
1632 * Returns the error code from the last instance.
1633 */
amdgpu_device_ip_set_powergating_state(void * dev,enum amd_ip_block_type block_type,enum amd_powergating_state state)1634 int amdgpu_device_ip_set_powergating_state(void *dev,
1635 enum amd_ip_block_type block_type,
1636 enum amd_powergating_state state)
1637 {
1638 struct amdgpu_device *adev = dev;
1639 int i, r = 0;
1640
1641 for (i = 0; i < adev->num_ip_blocks; i++) {
1642 if (!adev->ip_blocks[i].status.valid)
1643 continue;
1644 if (adev->ip_blocks[i].version->type != block_type)
1645 continue;
1646 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1647 continue;
1648 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1649 (void *)adev, state);
1650 if (r)
1651 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1652 adev->ip_blocks[i].version->funcs->name, r);
1653 }
1654 return r;
1655 }
1656
1657 /**
1658 * amdgpu_device_ip_get_clockgating_state - get the CG state
1659 *
1660 * @adev: amdgpu_device pointer
1661 * @flags: clockgating feature flags
1662 *
1663 * Walks the list of IPs on the device and updates the clockgating
1664 * flags for each IP.
1665 * Updates @flags with the feature flags for each hardware IP where
1666 * clockgating is enabled.
1667 */
amdgpu_device_ip_get_clockgating_state(struct amdgpu_device * adev,u64 * flags)1668 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1669 u64 *flags)
1670 {
1671 int i;
1672
1673 for (i = 0; i < adev->num_ip_blocks; i++) {
1674 if (!adev->ip_blocks[i].status.valid)
1675 continue;
1676 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1677 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1678 }
1679 }
1680
1681 /**
1682 * amdgpu_device_ip_wait_for_idle - wait for idle
1683 *
1684 * @adev: amdgpu_device pointer
1685 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1686 *
1687 * Waits for the request hardware IP to be idle.
1688 * Returns 0 for success or a negative error code on failure.
1689 */
amdgpu_device_ip_wait_for_idle(struct amdgpu_device * adev,enum amd_ip_block_type block_type)1690 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1691 enum amd_ip_block_type block_type)
1692 {
1693 int i, r;
1694
1695 for (i = 0; i < adev->num_ip_blocks; i++) {
1696 if (!adev->ip_blocks[i].status.valid)
1697 continue;
1698 if (adev->ip_blocks[i].version->type == block_type) {
1699 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1700 if (r)
1701 return r;
1702 break;
1703 }
1704 }
1705 return 0;
1706
1707 }
1708
1709 /**
1710 * amdgpu_device_ip_is_idle - is the hardware IP idle
1711 *
1712 * @adev: amdgpu_device pointer
1713 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1714 *
1715 * Check if the hardware IP is idle or not.
1716 * Returns true if it the IP is idle, false if not.
1717 */
amdgpu_device_ip_is_idle(struct amdgpu_device * adev,enum amd_ip_block_type block_type)1718 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1719 enum amd_ip_block_type block_type)
1720 {
1721 int i;
1722
1723 for (i = 0; i < adev->num_ip_blocks; i++) {
1724 if (!adev->ip_blocks[i].status.valid)
1725 continue;
1726 if (adev->ip_blocks[i].version->type == block_type)
1727 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1728 }
1729 return true;
1730
1731 }
1732
1733 /**
1734 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1735 *
1736 * @adev: amdgpu_device pointer
1737 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1738 *
1739 * Returns a pointer to the hardware IP block structure
1740 * if it exists for the asic, otherwise NULL.
1741 */
1742 struct amdgpu_ip_block *
amdgpu_device_ip_get_ip_block(struct amdgpu_device * adev,enum amd_ip_block_type type)1743 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1744 enum amd_ip_block_type type)
1745 {
1746 int i;
1747
1748 for (i = 0; i < adev->num_ip_blocks; i++)
1749 if (adev->ip_blocks[i].version->type == type)
1750 return &adev->ip_blocks[i];
1751
1752 return NULL;
1753 }
1754
1755 /**
1756 * amdgpu_device_ip_block_version_cmp
1757 *
1758 * @adev: amdgpu_device pointer
1759 * @type: enum amd_ip_block_type
1760 * @major: major version
1761 * @minor: minor version
1762 *
1763 * return 0 if equal or greater
1764 * return 1 if smaller or the ip_block doesn't exist
1765 */
amdgpu_device_ip_block_version_cmp(struct amdgpu_device * adev,enum amd_ip_block_type type,u32 major,u32 minor)1766 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1767 enum amd_ip_block_type type,
1768 u32 major, u32 minor)
1769 {
1770 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1771
1772 if (ip_block && ((ip_block->version->major > major) ||
1773 ((ip_block->version->major == major) &&
1774 (ip_block->version->minor >= minor))))
1775 return 0;
1776
1777 return 1;
1778 }
1779
1780 /**
1781 * amdgpu_device_ip_block_add
1782 *
1783 * @adev: amdgpu_device pointer
1784 * @ip_block_version: pointer to the IP to add
1785 *
1786 * Adds the IP block driver information to the collection of IPs
1787 * on the asic.
1788 */
amdgpu_device_ip_block_add(struct amdgpu_device * adev,const struct amdgpu_ip_block_version * ip_block_version)1789 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1790 const struct amdgpu_ip_block_version *ip_block_version)
1791 {
1792 if (!ip_block_version)
1793 return -EINVAL;
1794
1795 switch (ip_block_version->type) {
1796 case AMD_IP_BLOCK_TYPE_VCN:
1797 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1798 return 0;
1799 break;
1800 case AMD_IP_BLOCK_TYPE_JPEG:
1801 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1802 return 0;
1803 break;
1804 default:
1805 break;
1806 }
1807
1808 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1809 ip_block_version->funcs->name);
1810
1811 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1812
1813 return 0;
1814 }
1815
1816 /**
1817 * amdgpu_device_enable_virtual_display - enable virtual display feature
1818 *
1819 * @adev: amdgpu_device pointer
1820 *
1821 * Enabled the virtual display feature if the user has enabled it via
1822 * the module parameter virtual_display. This feature provides a virtual
1823 * display hardware on headless boards or in virtualized environments.
1824 * This function parses and validates the configuration string specified by
1825 * the user and configues the virtual display configuration (number of
1826 * virtual connectors, crtcs, etc.) specified.
1827 */
amdgpu_device_enable_virtual_display(struct amdgpu_device * adev)1828 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1829 {
1830 adev->enable_virtual_display = false;
1831
1832 if (amdgpu_virtual_display) {
1833 const char *pci_address_name = pci_name(adev->pdev);
1834 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1835
1836 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1837 pciaddstr_tmp = pciaddstr;
1838 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1839 pciaddname = strsep(&pciaddname_tmp, ",");
1840 if (!strcmp("all", pciaddname)
1841 || !strcmp(pci_address_name, pciaddname)) {
1842 long num_crtc;
1843 int res = -1;
1844
1845 adev->enable_virtual_display = true;
1846
1847 if (pciaddname_tmp)
1848 res = kstrtol(pciaddname_tmp, 10,
1849 &num_crtc);
1850
1851 if (!res) {
1852 if (num_crtc < 1)
1853 num_crtc = 1;
1854 if (num_crtc > 6)
1855 num_crtc = 6;
1856 adev->mode_info.num_crtc = num_crtc;
1857 } else {
1858 adev->mode_info.num_crtc = 1;
1859 }
1860 break;
1861 }
1862 }
1863
1864 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1865 amdgpu_virtual_display, pci_address_name,
1866 adev->enable_virtual_display, adev->mode_info.num_crtc);
1867
1868 kfree(pciaddstr);
1869 }
1870 }
1871
amdgpu_device_set_sriov_virtual_display(struct amdgpu_device * adev)1872 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
1873 {
1874 if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
1875 adev->mode_info.num_crtc = 1;
1876 adev->enable_virtual_display = true;
1877 DRM_INFO("virtual_display:%d, num_crtc:%d\n",
1878 adev->enable_virtual_display, adev->mode_info.num_crtc);
1879 }
1880 }
1881
1882 /**
1883 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1884 *
1885 * @adev: amdgpu_device pointer
1886 *
1887 * Parses the asic configuration parameters specified in the gpu info
1888 * firmware and makes them availale to the driver for use in configuring
1889 * the asic.
1890 * Returns 0 on success, -EINVAL on failure.
1891 */
amdgpu_device_parse_gpu_info_fw(struct amdgpu_device * adev)1892 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1893 {
1894 const char *chip_name;
1895 char fw_name[40];
1896 int err;
1897 const struct gpu_info_firmware_header_v1_0 *hdr;
1898
1899 adev->firmware.gpu_info_fw = NULL;
1900
1901 if (adev->mman.discovery_bin)
1902 return 0;
1903
1904 switch (adev->asic_type) {
1905 default:
1906 return 0;
1907 case CHIP_VEGA10:
1908 chip_name = "vega10";
1909 break;
1910 case CHIP_VEGA12:
1911 chip_name = "vega12";
1912 break;
1913 case CHIP_RAVEN:
1914 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1915 chip_name = "raven2";
1916 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1917 chip_name = "picasso";
1918 else
1919 chip_name = "raven";
1920 break;
1921 case CHIP_ARCTURUS:
1922 chip_name = "arcturus";
1923 break;
1924 case CHIP_NAVI12:
1925 chip_name = "navi12";
1926 break;
1927 }
1928
1929 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1930 err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, fw_name);
1931 if (err) {
1932 dev_err(adev->dev,
1933 "Failed to get gpu_info firmware \"%s\"\n",
1934 fw_name);
1935 goto out;
1936 }
1937
1938 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1939 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1940
1941 switch (hdr->version_major) {
1942 case 1:
1943 {
1944 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1945 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1946 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1947
1948 /*
1949 * Should be droped when DAL no longer needs it.
1950 */
1951 if (adev->asic_type == CHIP_NAVI12)
1952 goto parse_soc_bounding_box;
1953
1954 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1955 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1956 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1957 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1958 adev->gfx.config.max_texture_channel_caches =
1959 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1960 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1961 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1962 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1963 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1964 adev->gfx.config.double_offchip_lds_buf =
1965 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1966 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1967 adev->gfx.cu_info.max_waves_per_simd =
1968 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1969 adev->gfx.cu_info.max_scratch_slots_per_cu =
1970 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1971 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1972 if (hdr->version_minor >= 1) {
1973 const struct gpu_info_firmware_v1_1 *gpu_info_fw =
1974 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
1975 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1976 adev->gfx.config.num_sc_per_sh =
1977 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
1978 adev->gfx.config.num_packer_per_sc =
1979 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
1980 }
1981
1982 parse_soc_bounding_box:
1983 /*
1984 * soc bounding box info is not integrated in disocovery table,
1985 * we always need to parse it from gpu info firmware if needed.
1986 */
1987 if (hdr->version_minor == 2) {
1988 const struct gpu_info_firmware_v1_2 *gpu_info_fw =
1989 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
1990 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1991 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
1992 }
1993 break;
1994 }
1995 default:
1996 dev_err(adev->dev,
1997 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1998 err = -EINVAL;
1999 goto out;
2000 }
2001 out:
2002 return err;
2003 }
2004
2005 /**
2006 * amdgpu_device_ip_early_init - run early init for hardware IPs
2007 *
2008 * @adev: amdgpu_device pointer
2009 *
2010 * Early initialization pass for hardware IPs. The hardware IPs that make
2011 * up each asic are discovered each IP's early_init callback is run. This
2012 * is the first stage in initializing the asic.
2013 * Returns 0 on success, negative error code on failure.
2014 */
amdgpu_device_ip_early_init(struct amdgpu_device * adev)2015 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2016 {
2017 struct pci_dev *parent;
2018 int i, r;
2019 bool total;
2020
2021 amdgpu_device_enable_virtual_display(adev);
2022
2023 if (amdgpu_sriov_vf(adev)) {
2024 r = amdgpu_virt_request_full_gpu(adev, true);
2025 if (r)
2026 return r;
2027 }
2028
2029 switch (adev->asic_type) {
2030 #ifdef CONFIG_DRM_AMDGPU_SI
2031 case CHIP_VERDE:
2032 case CHIP_TAHITI:
2033 case CHIP_PITCAIRN:
2034 case CHIP_OLAND:
2035 case CHIP_HAINAN:
2036 adev->family = AMDGPU_FAMILY_SI;
2037 r = si_set_ip_blocks(adev);
2038 if (r)
2039 return r;
2040 break;
2041 #endif
2042 #ifdef CONFIG_DRM_AMDGPU_CIK
2043 case CHIP_BONAIRE:
2044 case CHIP_HAWAII:
2045 case CHIP_KAVERI:
2046 case CHIP_KABINI:
2047 case CHIP_MULLINS:
2048 if (adev->flags & AMD_IS_APU)
2049 adev->family = AMDGPU_FAMILY_KV;
2050 else
2051 adev->family = AMDGPU_FAMILY_CI;
2052
2053 r = cik_set_ip_blocks(adev);
2054 if (r)
2055 return r;
2056 break;
2057 #endif
2058 case CHIP_TOPAZ:
2059 case CHIP_TONGA:
2060 case CHIP_FIJI:
2061 case CHIP_POLARIS10:
2062 case CHIP_POLARIS11:
2063 case CHIP_POLARIS12:
2064 case CHIP_VEGAM:
2065 case CHIP_CARRIZO:
2066 case CHIP_STONEY:
2067 if (adev->flags & AMD_IS_APU)
2068 adev->family = AMDGPU_FAMILY_CZ;
2069 else
2070 adev->family = AMDGPU_FAMILY_VI;
2071
2072 r = vi_set_ip_blocks(adev);
2073 if (r)
2074 return r;
2075 break;
2076 default:
2077 r = amdgpu_discovery_set_ip_blocks(adev);
2078 if (r)
2079 return r;
2080 break;
2081 }
2082
2083 if (amdgpu_has_atpx() &&
2084 (amdgpu_is_atpx_hybrid() ||
2085 amdgpu_has_atpx_dgpu_power_cntl()) &&
2086 ((adev->flags & AMD_IS_APU) == 0) &&
2087 !dev_is_removable(&adev->pdev->dev))
2088 adev->flags |= AMD_IS_PX;
2089
2090 if (!(adev->flags & AMD_IS_APU)) {
2091 parent = pcie_find_root_port(adev->pdev);
2092 adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2093 }
2094
2095
2096 adev->pm.pp_feature = amdgpu_pp_feature_mask;
2097 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2098 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2099 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2100 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2101 if (!amdgpu_device_pcie_dynamic_switching_supported())
2102 adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
2103
2104 total = true;
2105 for (i = 0; i < adev->num_ip_blocks; i++) {
2106 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2107 DRM_WARN("disabled ip block: %d <%s>\n",
2108 i, adev->ip_blocks[i].version->funcs->name);
2109 adev->ip_blocks[i].status.valid = false;
2110 } else {
2111 if (adev->ip_blocks[i].version->funcs->early_init) {
2112 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2113 if (r == -ENOENT) {
2114 adev->ip_blocks[i].status.valid = false;
2115 } else if (r) {
2116 DRM_ERROR("early_init of IP block <%s> failed %d\n",
2117 adev->ip_blocks[i].version->funcs->name, r);
2118 total = false;
2119 } else {
2120 adev->ip_blocks[i].status.valid = true;
2121 }
2122 } else {
2123 adev->ip_blocks[i].status.valid = true;
2124 }
2125 }
2126 /* get the vbios after the asic_funcs are set up */
2127 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2128 r = amdgpu_device_parse_gpu_info_fw(adev);
2129 if (r)
2130 return r;
2131
2132 /* Read BIOS */
2133 if (amdgpu_device_read_bios(adev)) {
2134 if (!amdgpu_get_bios(adev))
2135 return -EINVAL;
2136
2137 r = amdgpu_atombios_init(adev);
2138 if (r) {
2139 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2140 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2141 return r;
2142 }
2143 }
2144
2145 /*get pf2vf msg info at it's earliest time*/
2146 if (amdgpu_sriov_vf(adev))
2147 amdgpu_virt_init_data_exchange(adev);
2148
2149 }
2150 }
2151 if (!total)
2152 return -ENODEV;
2153
2154 amdgpu_amdkfd_device_probe(adev);
2155 adev->cg_flags &= amdgpu_cg_mask;
2156 adev->pg_flags &= amdgpu_pg_mask;
2157
2158 return 0;
2159 }
2160
amdgpu_device_ip_hw_init_phase1(struct amdgpu_device * adev)2161 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2162 {
2163 int i, r;
2164
2165 for (i = 0; i < adev->num_ip_blocks; i++) {
2166 if (!adev->ip_blocks[i].status.sw)
2167 continue;
2168 if (adev->ip_blocks[i].status.hw)
2169 continue;
2170 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2171 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2172 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2173 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2174 if (r) {
2175 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2176 adev->ip_blocks[i].version->funcs->name, r);
2177 return r;
2178 }
2179 adev->ip_blocks[i].status.hw = true;
2180 }
2181 }
2182
2183 return 0;
2184 }
2185
amdgpu_device_ip_hw_init_phase2(struct amdgpu_device * adev)2186 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2187 {
2188 int i, r;
2189
2190 for (i = 0; i < adev->num_ip_blocks; i++) {
2191 if (!adev->ip_blocks[i].status.sw)
2192 continue;
2193 if (adev->ip_blocks[i].status.hw)
2194 continue;
2195 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2196 if (r) {
2197 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2198 adev->ip_blocks[i].version->funcs->name, r);
2199 return r;
2200 }
2201 adev->ip_blocks[i].status.hw = true;
2202 }
2203
2204 return 0;
2205 }
2206
amdgpu_device_fw_loading(struct amdgpu_device * adev)2207 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2208 {
2209 int r = 0;
2210 int i;
2211 uint32_t smu_version;
2212
2213 if (adev->asic_type >= CHIP_VEGA10) {
2214 for (i = 0; i < adev->num_ip_blocks; i++) {
2215 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2216 continue;
2217
2218 if (!adev->ip_blocks[i].status.sw)
2219 continue;
2220
2221 /* no need to do the fw loading again if already done*/
2222 if (adev->ip_blocks[i].status.hw == true)
2223 break;
2224
2225 if (amdgpu_in_reset(adev) || adev->in_suspend) {
2226 r = adev->ip_blocks[i].version->funcs->resume(adev);
2227 if (r) {
2228 DRM_ERROR("resume of IP block <%s> failed %d\n",
2229 adev->ip_blocks[i].version->funcs->name, r);
2230 return r;
2231 }
2232 } else {
2233 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2234 if (r) {
2235 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2236 adev->ip_blocks[i].version->funcs->name, r);
2237 return r;
2238 }
2239 }
2240
2241 adev->ip_blocks[i].status.hw = true;
2242 break;
2243 }
2244 }
2245
2246 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2247 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2248
2249 return r;
2250 }
2251
amdgpu_device_init_schedulers(struct amdgpu_device * adev)2252 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2253 {
2254 long timeout;
2255 int r, i;
2256
2257 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2258 struct amdgpu_ring *ring = adev->rings[i];
2259
2260 /* No need to setup the GPU scheduler for rings that don't need it */
2261 if (!ring || ring->no_scheduler)
2262 continue;
2263
2264 switch (ring->funcs->type) {
2265 case AMDGPU_RING_TYPE_GFX:
2266 timeout = adev->gfx_timeout;
2267 break;
2268 case AMDGPU_RING_TYPE_COMPUTE:
2269 timeout = adev->compute_timeout;
2270 break;
2271 case AMDGPU_RING_TYPE_SDMA:
2272 timeout = adev->sdma_timeout;
2273 break;
2274 default:
2275 timeout = adev->video_timeout;
2276 break;
2277 }
2278
2279 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
2280 ring->num_hw_submission, 0,
2281 timeout, adev->reset_domain->wq,
2282 ring->sched_score, ring->name,
2283 adev->dev);
2284 if (r) {
2285 DRM_ERROR("Failed to create scheduler on ring %s.\n",
2286 ring->name);
2287 return r;
2288 }
2289 }
2290
2291 amdgpu_xcp_update_partition_sched_list(adev);
2292
2293 return 0;
2294 }
2295
2296
2297 /**
2298 * amdgpu_device_ip_init - run init for hardware IPs
2299 *
2300 * @adev: amdgpu_device pointer
2301 *
2302 * Main initialization pass for hardware IPs. The list of all the hardware
2303 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2304 * are run. sw_init initializes the software state associated with each IP
2305 * and hw_init initializes the hardware associated with each IP.
2306 * Returns 0 on success, negative error code on failure.
2307 */
amdgpu_device_ip_init(struct amdgpu_device * adev)2308 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2309 {
2310 int i, r;
2311
2312 r = amdgpu_ras_init(adev);
2313 if (r)
2314 return r;
2315
2316 for (i = 0; i < adev->num_ip_blocks; i++) {
2317 if (!adev->ip_blocks[i].status.valid)
2318 continue;
2319 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2320 if (r) {
2321 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2322 adev->ip_blocks[i].version->funcs->name, r);
2323 goto init_failed;
2324 }
2325 adev->ip_blocks[i].status.sw = true;
2326
2327 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2328 /* need to do common hw init early so everything is set up for gmc */
2329 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2330 if (r) {
2331 DRM_ERROR("hw_init %d failed %d\n", i, r);
2332 goto init_failed;
2333 }
2334 adev->ip_blocks[i].status.hw = true;
2335 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2336 /* need to do gmc hw init early so we can allocate gpu mem */
2337 /* Try to reserve bad pages early */
2338 if (amdgpu_sriov_vf(adev))
2339 amdgpu_virt_exchange_data(adev);
2340
2341 r = amdgpu_device_mem_scratch_init(adev);
2342 if (r) {
2343 DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
2344 goto init_failed;
2345 }
2346 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2347 if (r) {
2348 DRM_ERROR("hw_init %d failed %d\n", i, r);
2349 goto init_failed;
2350 }
2351 r = amdgpu_device_wb_init(adev);
2352 if (r) {
2353 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2354 goto init_failed;
2355 }
2356 adev->ip_blocks[i].status.hw = true;
2357
2358 /* right after GMC hw init, we create CSA */
2359 if (adev->gfx.mcbp) {
2360 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2361 AMDGPU_GEM_DOMAIN_VRAM |
2362 AMDGPU_GEM_DOMAIN_GTT,
2363 AMDGPU_CSA_SIZE);
2364 if (r) {
2365 DRM_ERROR("allocate CSA failed %d\n", r);
2366 goto init_failed;
2367 }
2368 }
2369 }
2370 }
2371
2372 if (amdgpu_sriov_vf(adev))
2373 amdgpu_virt_init_data_exchange(adev);
2374
2375 r = amdgpu_ib_pool_init(adev);
2376 if (r) {
2377 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2378 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2379 goto init_failed;
2380 }
2381
2382 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2383 if (r)
2384 goto init_failed;
2385
2386 r = amdgpu_device_ip_hw_init_phase1(adev);
2387 if (r)
2388 goto init_failed;
2389
2390 r = amdgpu_device_fw_loading(adev);
2391 if (r)
2392 goto init_failed;
2393
2394 r = amdgpu_device_ip_hw_init_phase2(adev);
2395 if (r)
2396 goto init_failed;
2397
2398 /*
2399 * retired pages will be loaded from eeprom and reserved here,
2400 * it should be called after amdgpu_device_ip_hw_init_phase2 since
2401 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2402 * for I2C communication which only true at this point.
2403 *
2404 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2405 * failure from bad gpu situation and stop amdgpu init process
2406 * accordingly. For other failed cases, it will still release all
2407 * the resource and print error message, rather than returning one
2408 * negative value to upper level.
2409 *
2410 * Note: theoretically, this should be called before all vram allocations
2411 * to protect retired page from abusing
2412 */
2413 r = amdgpu_ras_recovery_init(adev);
2414 if (r)
2415 goto init_failed;
2416
2417 /**
2418 * In case of XGMI grab extra reference for reset domain for this device
2419 */
2420 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2421 if (amdgpu_xgmi_add_device(adev) == 0) {
2422 if (!amdgpu_sriov_vf(adev)) {
2423 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2424
2425 if (WARN_ON(!hive)) {
2426 r = -ENOENT;
2427 goto init_failed;
2428 }
2429
2430 if (!hive->reset_domain ||
2431 !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2432 r = -ENOENT;
2433 amdgpu_put_xgmi_hive(hive);
2434 goto init_failed;
2435 }
2436
2437 /* Drop the early temporary reset domain we created for device */
2438 amdgpu_reset_put_reset_domain(adev->reset_domain);
2439 adev->reset_domain = hive->reset_domain;
2440 amdgpu_put_xgmi_hive(hive);
2441 }
2442 }
2443 }
2444
2445 r = amdgpu_device_init_schedulers(adev);
2446 if (r)
2447 goto init_failed;
2448
2449 /* Don't init kfd if whole hive need to be reset during init */
2450 if (!adev->gmc.xgmi.pending_reset) {
2451 kgd2kfd_init_zone_device(adev);
2452 amdgpu_amdkfd_device_init(adev);
2453 }
2454
2455 amdgpu_fru_get_product_info(adev);
2456
2457 init_failed:
2458
2459 return r;
2460 }
2461
2462 /**
2463 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2464 *
2465 * @adev: amdgpu_device pointer
2466 *
2467 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
2468 * this function before a GPU reset. If the value is retained after a
2469 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
2470 */
amdgpu_device_fill_reset_magic(struct amdgpu_device * adev)2471 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2472 {
2473 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2474 }
2475
2476 /**
2477 * amdgpu_device_check_vram_lost - check if vram is valid
2478 *
2479 * @adev: amdgpu_device pointer
2480 *
2481 * Checks the reset magic value written to the gart pointer in VRAM.
2482 * The driver calls this after a GPU reset to see if the contents of
2483 * VRAM is lost or now.
2484 * returns true if vram is lost, false if not.
2485 */
amdgpu_device_check_vram_lost(struct amdgpu_device * adev)2486 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2487 {
2488 if (memcmp(adev->gart.ptr, adev->reset_magic,
2489 AMDGPU_RESET_MAGIC_NUM))
2490 return true;
2491
2492 if (!amdgpu_in_reset(adev))
2493 return false;
2494
2495 /*
2496 * For all ASICs with baco/mode1 reset, the VRAM is
2497 * always assumed to be lost.
2498 */
2499 switch (amdgpu_asic_reset_method(adev)) {
2500 case AMD_RESET_METHOD_BACO:
2501 case AMD_RESET_METHOD_MODE1:
2502 return true;
2503 default:
2504 return false;
2505 }
2506 }
2507
2508 /**
2509 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2510 *
2511 * @adev: amdgpu_device pointer
2512 * @state: clockgating state (gate or ungate)
2513 *
2514 * The list of all the hardware IPs that make up the asic is walked and the
2515 * set_clockgating_state callbacks are run.
2516 * Late initialization pass enabling clockgating for hardware IPs.
2517 * Fini or suspend, pass disabling clockgating for hardware IPs.
2518 * Returns 0 on success, negative error code on failure.
2519 */
2520
amdgpu_device_set_cg_state(struct amdgpu_device * adev,enum amd_clockgating_state state)2521 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2522 enum amd_clockgating_state state)
2523 {
2524 int i, j, r;
2525
2526 if (amdgpu_emu_mode == 1)
2527 return 0;
2528
2529 for (j = 0; j < adev->num_ip_blocks; j++) {
2530 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2531 if (!adev->ip_blocks[i].status.late_initialized)
2532 continue;
2533 /* skip CG for GFX, SDMA on S0ix */
2534 if (adev->in_s0ix &&
2535 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2536 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2537 continue;
2538 /* skip CG for VCE/UVD, it's handled specially */
2539 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2540 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2541 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2542 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2543 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2544 /* enable clockgating to save power */
2545 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2546 state);
2547 if (r) {
2548 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2549 adev->ip_blocks[i].version->funcs->name, r);
2550 return r;
2551 }
2552 }
2553 }
2554
2555 return 0;
2556 }
2557
amdgpu_device_set_pg_state(struct amdgpu_device * adev,enum amd_powergating_state state)2558 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2559 enum amd_powergating_state state)
2560 {
2561 int i, j, r;
2562
2563 if (amdgpu_emu_mode == 1)
2564 return 0;
2565
2566 for (j = 0; j < adev->num_ip_blocks; j++) {
2567 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2568 if (!adev->ip_blocks[i].status.late_initialized)
2569 continue;
2570 /* skip PG for GFX, SDMA on S0ix */
2571 if (adev->in_s0ix &&
2572 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2573 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2574 continue;
2575 /* skip CG for VCE/UVD, it's handled specially */
2576 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2577 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2578 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2579 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2580 adev->ip_blocks[i].version->funcs->set_powergating_state) {
2581 /* enable powergating to save power */
2582 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2583 state);
2584 if (r) {
2585 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2586 adev->ip_blocks[i].version->funcs->name, r);
2587 return r;
2588 }
2589 }
2590 }
2591 return 0;
2592 }
2593
amdgpu_device_enable_mgpu_fan_boost(void)2594 static int amdgpu_device_enable_mgpu_fan_boost(void)
2595 {
2596 struct amdgpu_gpu_instance *gpu_ins;
2597 struct amdgpu_device *adev;
2598 int i, ret = 0;
2599
2600 mutex_lock(&mgpu_info.mutex);
2601
2602 /*
2603 * MGPU fan boost feature should be enabled
2604 * only when there are two or more dGPUs in
2605 * the system
2606 */
2607 if (mgpu_info.num_dgpu < 2)
2608 goto out;
2609
2610 for (i = 0; i < mgpu_info.num_dgpu; i++) {
2611 gpu_ins = &(mgpu_info.gpu_ins[i]);
2612 adev = gpu_ins->adev;
2613 if (!(adev->flags & AMD_IS_APU) &&
2614 !gpu_ins->mgpu_fan_enabled) {
2615 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2616 if (ret)
2617 break;
2618
2619 gpu_ins->mgpu_fan_enabled = 1;
2620 }
2621 }
2622
2623 out:
2624 mutex_unlock(&mgpu_info.mutex);
2625
2626 return ret;
2627 }
2628
2629 /**
2630 * amdgpu_device_ip_late_init - run late init for hardware IPs
2631 *
2632 * @adev: amdgpu_device pointer
2633 *
2634 * Late initialization pass for hardware IPs. The list of all the hardware
2635 * IPs that make up the asic is walked and the late_init callbacks are run.
2636 * late_init covers any special initialization that an IP requires
2637 * after all of the have been initialized or something that needs to happen
2638 * late in the init process.
2639 * Returns 0 on success, negative error code on failure.
2640 */
amdgpu_device_ip_late_init(struct amdgpu_device * adev)2641 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2642 {
2643 struct amdgpu_gpu_instance *gpu_instance;
2644 int i = 0, r;
2645
2646 for (i = 0; i < adev->num_ip_blocks; i++) {
2647 if (!adev->ip_blocks[i].status.hw)
2648 continue;
2649 if (adev->ip_blocks[i].version->funcs->late_init) {
2650 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2651 if (r) {
2652 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2653 adev->ip_blocks[i].version->funcs->name, r);
2654 return r;
2655 }
2656 }
2657 adev->ip_blocks[i].status.late_initialized = true;
2658 }
2659
2660 r = amdgpu_ras_late_init(adev);
2661 if (r) {
2662 DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2663 return r;
2664 }
2665
2666 amdgpu_ras_set_error_query_ready(adev, true);
2667
2668 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2669 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2670
2671 amdgpu_device_fill_reset_magic(adev);
2672
2673 r = amdgpu_device_enable_mgpu_fan_boost();
2674 if (r)
2675 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2676
2677 /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2678 if (amdgpu_passthrough(adev) &&
2679 ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) ||
2680 adev->asic_type == CHIP_ALDEBARAN))
2681 amdgpu_dpm_handle_passthrough_sbr(adev, true);
2682
2683 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2684 mutex_lock(&mgpu_info.mutex);
2685
2686 /*
2687 * Reset device p-state to low as this was booted with high.
2688 *
2689 * This should be performed only after all devices from the same
2690 * hive get initialized.
2691 *
2692 * However, it's unknown how many device in the hive in advance.
2693 * As this is counted one by one during devices initializations.
2694 *
2695 * So, we wait for all XGMI interlinked devices initialized.
2696 * This may bring some delays as those devices may come from
2697 * different hives. But that should be OK.
2698 */
2699 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2700 for (i = 0; i < mgpu_info.num_gpu; i++) {
2701 gpu_instance = &(mgpu_info.gpu_ins[i]);
2702 if (gpu_instance->adev->flags & AMD_IS_APU)
2703 continue;
2704
2705 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2706 AMDGPU_XGMI_PSTATE_MIN);
2707 if (r) {
2708 DRM_ERROR("pstate setting failed (%d).\n", r);
2709 break;
2710 }
2711 }
2712 }
2713
2714 mutex_unlock(&mgpu_info.mutex);
2715 }
2716
2717 return 0;
2718 }
2719
2720 /**
2721 * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2722 *
2723 * @adev: amdgpu_device pointer
2724 *
2725 * For ASICs need to disable SMC first
2726 */
amdgpu_device_smu_fini_early(struct amdgpu_device * adev)2727 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2728 {
2729 int i, r;
2730
2731 if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2732 return;
2733
2734 for (i = 0; i < adev->num_ip_blocks; i++) {
2735 if (!adev->ip_blocks[i].status.hw)
2736 continue;
2737 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2738 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2739 /* XXX handle errors */
2740 if (r) {
2741 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2742 adev->ip_blocks[i].version->funcs->name, r);
2743 }
2744 adev->ip_blocks[i].status.hw = false;
2745 break;
2746 }
2747 }
2748 }
2749
amdgpu_device_ip_fini_early(struct amdgpu_device * adev)2750 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2751 {
2752 int i, r;
2753
2754 for (i = 0; i < adev->num_ip_blocks; i++) {
2755 if (!adev->ip_blocks[i].version->funcs->early_fini)
2756 continue;
2757
2758 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2759 if (r) {
2760 DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2761 adev->ip_blocks[i].version->funcs->name, r);
2762 }
2763 }
2764
2765 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2766 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2767
2768 amdgpu_amdkfd_suspend(adev, false);
2769
2770 /* Workaroud for ASICs need to disable SMC first */
2771 amdgpu_device_smu_fini_early(adev);
2772
2773 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2774 if (!adev->ip_blocks[i].status.hw)
2775 continue;
2776
2777 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2778 /* XXX handle errors */
2779 if (r) {
2780 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2781 adev->ip_blocks[i].version->funcs->name, r);
2782 }
2783
2784 adev->ip_blocks[i].status.hw = false;
2785 }
2786
2787 if (amdgpu_sriov_vf(adev)) {
2788 if (amdgpu_virt_release_full_gpu(adev, false))
2789 DRM_ERROR("failed to release exclusive mode on fini\n");
2790 }
2791
2792 return 0;
2793 }
2794
2795 /**
2796 * amdgpu_device_ip_fini - run fini for hardware IPs
2797 *
2798 * @adev: amdgpu_device pointer
2799 *
2800 * Main teardown pass for hardware IPs. The list of all the hardware
2801 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2802 * are run. hw_fini tears down the hardware associated with each IP
2803 * and sw_fini tears down any software state associated with each IP.
2804 * Returns 0 on success, negative error code on failure.
2805 */
amdgpu_device_ip_fini(struct amdgpu_device * adev)2806 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2807 {
2808 int i, r;
2809
2810 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2811 amdgpu_virt_release_ras_err_handler_data(adev);
2812
2813 if (adev->gmc.xgmi.num_physical_nodes > 1)
2814 amdgpu_xgmi_remove_device(adev);
2815
2816 amdgpu_amdkfd_device_fini_sw(adev);
2817
2818 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2819 if (!adev->ip_blocks[i].status.sw)
2820 continue;
2821
2822 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2823 amdgpu_ucode_free_bo(adev);
2824 amdgpu_free_static_csa(&adev->virt.csa_obj);
2825 amdgpu_device_wb_fini(adev);
2826 amdgpu_device_mem_scratch_fini(adev);
2827 amdgpu_ib_pool_fini(adev);
2828 }
2829
2830 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2831 /* XXX handle errors */
2832 if (r) {
2833 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2834 adev->ip_blocks[i].version->funcs->name, r);
2835 }
2836 adev->ip_blocks[i].status.sw = false;
2837 adev->ip_blocks[i].status.valid = false;
2838 }
2839
2840 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2841 if (!adev->ip_blocks[i].status.late_initialized)
2842 continue;
2843 if (adev->ip_blocks[i].version->funcs->late_fini)
2844 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2845 adev->ip_blocks[i].status.late_initialized = false;
2846 }
2847
2848 amdgpu_ras_fini(adev);
2849
2850 return 0;
2851 }
2852
2853 /**
2854 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2855 *
2856 * @work: work_struct.
2857 */
amdgpu_device_delayed_init_work_handler(struct work_struct * work)2858 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2859 {
2860 struct amdgpu_device *adev =
2861 container_of(work, struct amdgpu_device, delayed_init_work.work);
2862 int r;
2863
2864 r = amdgpu_ib_ring_tests(adev);
2865 if (r)
2866 DRM_ERROR("ib ring test failed (%d).\n", r);
2867 }
2868
amdgpu_device_delay_enable_gfx_off(struct work_struct * work)2869 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2870 {
2871 struct amdgpu_device *adev =
2872 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2873
2874 WARN_ON_ONCE(adev->gfx.gfx_off_state);
2875 WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2876
2877 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2878 adev->gfx.gfx_off_state = true;
2879 }
2880
2881 /**
2882 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2883 *
2884 * @adev: amdgpu_device pointer
2885 *
2886 * Main suspend function for hardware IPs. The list of all the hardware
2887 * IPs that make up the asic is walked, clockgating is disabled and the
2888 * suspend callbacks are run. suspend puts the hardware and software state
2889 * in each IP into a state suitable for suspend.
2890 * Returns 0 on success, negative error code on failure.
2891 */
amdgpu_device_ip_suspend_phase1(struct amdgpu_device * adev)2892 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2893 {
2894 int i, r;
2895
2896 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2897 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2898
2899 /*
2900 * Per PMFW team's suggestion, driver needs to handle gfxoff
2901 * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
2902 * scenario. Add the missing df cstate disablement here.
2903 */
2904 if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
2905 dev_warn(adev->dev, "Failed to disallow df cstate");
2906
2907 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2908 if (!adev->ip_blocks[i].status.valid)
2909 continue;
2910
2911 /* displays are handled separately */
2912 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2913 continue;
2914
2915 /* XXX handle errors */
2916 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2917 /* XXX handle errors */
2918 if (r) {
2919 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2920 adev->ip_blocks[i].version->funcs->name, r);
2921 return r;
2922 }
2923
2924 adev->ip_blocks[i].status.hw = false;
2925 }
2926
2927 return 0;
2928 }
2929
2930 /**
2931 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2932 *
2933 * @adev: amdgpu_device pointer
2934 *
2935 * Main suspend function for hardware IPs. The list of all the hardware
2936 * IPs that make up the asic is walked, clockgating is disabled and the
2937 * suspend callbacks are run. suspend puts the hardware and software state
2938 * in each IP into a state suitable for suspend.
2939 * Returns 0 on success, negative error code on failure.
2940 */
amdgpu_device_ip_suspend_phase2(struct amdgpu_device * adev)2941 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2942 {
2943 int i, r;
2944
2945 if (adev->in_s0ix)
2946 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
2947
2948 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2949 if (!adev->ip_blocks[i].status.valid)
2950 continue;
2951 /* displays are handled in phase1 */
2952 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2953 continue;
2954 /* PSP lost connection when err_event_athub occurs */
2955 if (amdgpu_ras_intr_triggered() &&
2956 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2957 adev->ip_blocks[i].status.hw = false;
2958 continue;
2959 }
2960
2961 /* skip unnecessary suspend if we do not initialize them yet */
2962 if (adev->gmc.xgmi.pending_reset &&
2963 !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2964 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2965 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2966 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2967 adev->ip_blocks[i].status.hw = false;
2968 continue;
2969 }
2970
2971 /* skip suspend of gfx/mes and psp for S0ix
2972 * gfx is in gfxoff state, so on resume it will exit gfxoff just
2973 * like at runtime. PSP is also part of the always on hardware
2974 * so no need to suspend it.
2975 */
2976 if (adev->in_s0ix &&
2977 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
2978 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2979 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
2980 continue;
2981
2982 /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
2983 if (adev->in_s0ix &&
2984 (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) &&
2985 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2986 continue;
2987
2988 /* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.
2989 * These are in TMR, hence are expected to be reused by PSP-TOS to reload
2990 * from this location and RLC Autoload automatically also gets loaded
2991 * from here based on PMFW -> PSP message during re-init sequence.
2992 * Therefore, the psp suspend & resume should be skipped to avoid destroy
2993 * the TMR and reload FWs again for IMU enabled APU ASICs.
2994 */
2995 if (amdgpu_in_reset(adev) &&
2996 (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
2997 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
2998 continue;
2999
3000 /* XXX handle errors */
3001 r = adev->ip_blocks[i].version->funcs->suspend(adev);
3002 /* XXX handle errors */
3003 if (r) {
3004 DRM_ERROR("suspend of IP block <%s> failed %d\n",
3005 adev->ip_blocks[i].version->funcs->name, r);
3006 }
3007 adev->ip_blocks[i].status.hw = false;
3008 /* handle putting the SMC in the appropriate state */
3009 if (!amdgpu_sriov_vf(adev)) {
3010 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3011 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3012 if (r) {
3013 DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3014 adev->mp1_state, r);
3015 return r;
3016 }
3017 }
3018 }
3019 }
3020
3021 return 0;
3022 }
3023
3024 /**
3025 * amdgpu_device_ip_suspend - run suspend for hardware IPs
3026 *
3027 * @adev: amdgpu_device pointer
3028 *
3029 * Main suspend function for hardware IPs. The list of all the hardware
3030 * IPs that make up the asic is walked, clockgating is disabled and the
3031 * suspend callbacks are run. suspend puts the hardware and software state
3032 * in each IP into a state suitable for suspend.
3033 * Returns 0 on success, negative error code on failure.
3034 */
amdgpu_device_ip_suspend(struct amdgpu_device * adev)3035 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3036 {
3037 int r;
3038
3039 if (amdgpu_sriov_vf(adev)) {
3040 amdgpu_virt_fini_data_exchange(adev);
3041 amdgpu_virt_request_full_gpu(adev, false);
3042 }
3043
3044 r = amdgpu_device_ip_suspend_phase1(adev);
3045 if (r)
3046 return r;
3047 r = amdgpu_device_ip_suspend_phase2(adev);
3048
3049 if (amdgpu_sriov_vf(adev))
3050 amdgpu_virt_release_full_gpu(adev, false);
3051
3052 return r;
3053 }
3054
amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device * adev)3055 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3056 {
3057 int i, r;
3058
3059 static enum amd_ip_block_type ip_order[] = {
3060 AMD_IP_BLOCK_TYPE_COMMON,
3061 AMD_IP_BLOCK_TYPE_GMC,
3062 AMD_IP_BLOCK_TYPE_PSP,
3063 AMD_IP_BLOCK_TYPE_IH,
3064 };
3065
3066 for (i = 0; i < adev->num_ip_blocks; i++) {
3067 int j;
3068 struct amdgpu_ip_block *block;
3069
3070 block = &adev->ip_blocks[i];
3071 block->status.hw = false;
3072
3073 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3074
3075 if (block->version->type != ip_order[j] ||
3076 !block->status.valid)
3077 continue;
3078
3079 r = block->version->funcs->hw_init(adev);
3080 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3081 if (r)
3082 return r;
3083 block->status.hw = true;
3084 }
3085 }
3086
3087 return 0;
3088 }
3089
amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device * adev)3090 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3091 {
3092 int i, r;
3093
3094 static enum amd_ip_block_type ip_order[] = {
3095 AMD_IP_BLOCK_TYPE_SMC,
3096 AMD_IP_BLOCK_TYPE_DCE,
3097 AMD_IP_BLOCK_TYPE_GFX,
3098 AMD_IP_BLOCK_TYPE_SDMA,
3099 AMD_IP_BLOCK_TYPE_MES,
3100 AMD_IP_BLOCK_TYPE_UVD,
3101 AMD_IP_BLOCK_TYPE_VCE,
3102 AMD_IP_BLOCK_TYPE_VCN,
3103 AMD_IP_BLOCK_TYPE_JPEG
3104 };
3105
3106 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3107 int j;
3108 struct amdgpu_ip_block *block;
3109
3110 for (j = 0; j < adev->num_ip_blocks; j++) {
3111 block = &adev->ip_blocks[j];
3112
3113 if (block->version->type != ip_order[i] ||
3114 !block->status.valid ||
3115 block->status.hw)
3116 continue;
3117
3118 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3119 r = block->version->funcs->resume(adev);
3120 else
3121 r = block->version->funcs->hw_init(adev);
3122
3123 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3124 if (r)
3125 return r;
3126 block->status.hw = true;
3127 }
3128 }
3129
3130 return 0;
3131 }
3132
3133 /**
3134 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3135 *
3136 * @adev: amdgpu_device pointer
3137 *
3138 * First resume function for hardware IPs. The list of all the hardware
3139 * IPs that make up the asic is walked and the resume callbacks are run for
3140 * COMMON, GMC, and IH. resume puts the hardware into a functional state
3141 * after a suspend and updates the software state as necessary. This
3142 * function is also used for restoring the GPU after a GPU reset.
3143 * Returns 0 on success, negative error code on failure.
3144 */
amdgpu_device_ip_resume_phase1(struct amdgpu_device * adev)3145 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3146 {
3147 int i, r;
3148
3149 for (i = 0; i < adev->num_ip_blocks; i++) {
3150 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3151 continue;
3152 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3153 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3154 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3155 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3156
3157 r = adev->ip_blocks[i].version->funcs->resume(adev);
3158 if (r) {
3159 DRM_ERROR("resume of IP block <%s> failed %d\n",
3160 adev->ip_blocks[i].version->funcs->name, r);
3161 return r;
3162 }
3163 adev->ip_blocks[i].status.hw = true;
3164 }
3165 }
3166
3167 return 0;
3168 }
3169
3170 /**
3171 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3172 *
3173 * @adev: amdgpu_device pointer
3174 *
3175 * First resume function for hardware IPs. The list of all the hardware
3176 * IPs that make up the asic is walked and the resume callbacks are run for
3177 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
3178 * functional state after a suspend and updates the software state as
3179 * necessary. This function is also used for restoring the GPU after a GPU
3180 * reset.
3181 * Returns 0 on success, negative error code on failure.
3182 */
amdgpu_device_ip_resume_phase2(struct amdgpu_device * adev)3183 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3184 {
3185 int i, r;
3186
3187 for (i = 0; i < adev->num_ip_blocks; i++) {
3188 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3189 continue;
3190 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3191 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3192 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3193 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3194 continue;
3195 r = adev->ip_blocks[i].version->funcs->resume(adev);
3196 if (r) {
3197 DRM_ERROR("resume of IP block <%s> failed %d\n",
3198 adev->ip_blocks[i].version->funcs->name, r);
3199 return r;
3200 }
3201 adev->ip_blocks[i].status.hw = true;
3202 }
3203
3204 return 0;
3205 }
3206
3207 /**
3208 * amdgpu_device_ip_resume - run resume for hardware IPs
3209 *
3210 * @adev: amdgpu_device pointer
3211 *
3212 * Main resume function for hardware IPs. The hardware IPs
3213 * are split into two resume functions because they are
3214 * also used in recovering from a GPU reset and some additional
3215 * steps need to be take between them. In this case (S3/S4) they are
3216 * run sequentially.
3217 * Returns 0 on success, negative error code on failure.
3218 */
amdgpu_device_ip_resume(struct amdgpu_device * adev)3219 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3220 {
3221 int r;
3222
3223 r = amdgpu_device_ip_resume_phase1(adev);
3224 if (r)
3225 return r;
3226
3227 r = amdgpu_device_fw_loading(adev);
3228 if (r)
3229 return r;
3230
3231 r = amdgpu_device_ip_resume_phase2(adev);
3232
3233 return r;
3234 }
3235
3236 /**
3237 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3238 *
3239 * @adev: amdgpu_device pointer
3240 *
3241 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3242 */
amdgpu_device_detect_sriov_bios(struct amdgpu_device * adev)3243 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3244 {
3245 if (amdgpu_sriov_vf(adev)) {
3246 if (adev->is_atom_fw) {
3247 if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3248 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3249 } else {
3250 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3251 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3252 }
3253
3254 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3255 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3256 }
3257 }
3258
3259 /**
3260 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3261 *
3262 * @asic_type: AMD asic type
3263 *
3264 * Check if there is DC (new modesetting infrastructre) support for an asic.
3265 * returns true if DC has support, false if not.
3266 */
amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)3267 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3268 {
3269 switch (asic_type) {
3270 #ifdef CONFIG_DRM_AMDGPU_SI
3271 case CHIP_HAINAN:
3272 #endif
3273 case CHIP_TOPAZ:
3274 /* chips with no display hardware */
3275 return false;
3276 #if defined(CONFIG_DRM_AMD_DC)
3277 case CHIP_TAHITI:
3278 case CHIP_PITCAIRN:
3279 case CHIP_VERDE:
3280 case CHIP_OLAND:
3281 /*
3282 * We have systems in the wild with these ASICs that require
3283 * LVDS and VGA support which is not supported with DC.
3284 *
3285 * Fallback to the non-DC driver here by default so as not to
3286 * cause regressions.
3287 */
3288 #if defined(CONFIG_DRM_AMD_DC_SI)
3289 return amdgpu_dc > 0;
3290 #else
3291 return false;
3292 #endif
3293 case CHIP_BONAIRE:
3294 case CHIP_KAVERI:
3295 case CHIP_KABINI:
3296 case CHIP_MULLINS:
3297 /*
3298 * We have systems in the wild with these ASICs that require
3299 * VGA support which is not supported with DC.
3300 *
3301 * Fallback to the non-DC driver here by default so as not to
3302 * cause regressions.
3303 */
3304 return amdgpu_dc > 0;
3305 default:
3306 return amdgpu_dc != 0;
3307 #else
3308 default:
3309 if (amdgpu_dc > 0)
3310 DRM_INFO_ONCE("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
3311 return false;
3312 #endif
3313 }
3314 }
3315
3316 /**
3317 * amdgpu_device_has_dc_support - check if dc is supported
3318 *
3319 * @adev: amdgpu_device pointer
3320 *
3321 * Returns true for supported, false for not supported
3322 */
amdgpu_device_has_dc_support(struct amdgpu_device * adev)3323 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3324 {
3325 if (adev->enable_virtual_display ||
3326 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3327 return false;
3328
3329 return amdgpu_device_asic_has_dc_support(adev->asic_type);
3330 }
3331
amdgpu_device_xgmi_reset_func(struct work_struct * __work)3332 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3333 {
3334 struct amdgpu_device *adev =
3335 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3336 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3337
3338 /* It's a bug to not have a hive within this function */
3339 if (WARN_ON(!hive))
3340 return;
3341
3342 /*
3343 * Use task barrier to synchronize all xgmi reset works across the
3344 * hive. task_barrier_enter and task_barrier_exit will block
3345 * until all the threads running the xgmi reset works reach
3346 * those points. task_barrier_full will do both blocks.
3347 */
3348 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3349
3350 task_barrier_enter(&hive->tb);
3351 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3352
3353 if (adev->asic_reset_res)
3354 goto fail;
3355
3356 task_barrier_exit(&hive->tb);
3357 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3358
3359 if (adev->asic_reset_res)
3360 goto fail;
3361
3362 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3363 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3364 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3365 } else {
3366
3367 task_barrier_full(&hive->tb);
3368 adev->asic_reset_res = amdgpu_asic_reset(adev);
3369 }
3370
3371 fail:
3372 if (adev->asic_reset_res)
3373 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3374 adev->asic_reset_res, adev_to_drm(adev)->unique);
3375 amdgpu_put_xgmi_hive(hive);
3376 }
3377
amdgpu_device_get_job_timeout_settings(struct amdgpu_device * adev)3378 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3379 {
3380 char *input = amdgpu_lockup_timeout;
3381 char *timeout_setting = NULL;
3382 int index = 0;
3383 long timeout;
3384 int ret = 0;
3385
3386 /*
3387 * By default timeout for non compute jobs is 10000
3388 * and 60000 for compute jobs.
3389 * In SR-IOV or passthrough mode, timeout for compute
3390 * jobs are 60000 by default.
3391 */
3392 adev->gfx_timeout = msecs_to_jiffies(10000);
3393 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3394 if (amdgpu_sriov_vf(adev))
3395 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3396 msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3397 else
3398 adev->compute_timeout = msecs_to_jiffies(60000);
3399
3400 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3401 while ((timeout_setting = strsep(&input, ",")) &&
3402 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3403 ret = kstrtol(timeout_setting, 0, &timeout);
3404 if (ret)
3405 return ret;
3406
3407 if (timeout == 0) {
3408 index++;
3409 continue;
3410 } else if (timeout < 0) {
3411 timeout = MAX_SCHEDULE_TIMEOUT;
3412 dev_warn(adev->dev, "lockup timeout disabled");
3413 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3414 } else {
3415 timeout = msecs_to_jiffies(timeout);
3416 }
3417
3418 switch (index++) {
3419 case 0:
3420 adev->gfx_timeout = timeout;
3421 break;
3422 case 1:
3423 adev->compute_timeout = timeout;
3424 break;
3425 case 2:
3426 adev->sdma_timeout = timeout;
3427 break;
3428 case 3:
3429 adev->video_timeout = timeout;
3430 break;
3431 default:
3432 break;
3433 }
3434 }
3435 /*
3436 * There is only one value specified and
3437 * it should apply to all non-compute jobs.
3438 */
3439 if (index == 1) {
3440 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3441 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3442 adev->compute_timeout = adev->gfx_timeout;
3443 }
3444 }
3445
3446 return ret;
3447 }
3448
3449 /**
3450 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3451 *
3452 * @adev: amdgpu_device pointer
3453 *
3454 * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3455 */
amdgpu_device_check_iommu_direct_map(struct amdgpu_device * adev)3456 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3457 {
3458 struct iommu_domain *domain;
3459
3460 domain = iommu_get_domain_for_dev(adev->dev);
3461 if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3462 adev->ram_is_direct_mapped = true;
3463 }
3464
3465 static const struct attribute *amdgpu_dev_attributes[] = {
3466 &dev_attr_pcie_replay_count.attr,
3467 NULL
3468 };
3469
amdgpu_device_set_mcbp(struct amdgpu_device * adev)3470 static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
3471 {
3472 if (amdgpu_mcbp == 1)
3473 adev->gfx.mcbp = true;
3474 else if (amdgpu_mcbp == 0)
3475 adev->gfx.mcbp = false;
3476
3477 if (amdgpu_sriov_vf(adev))
3478 adev->gfx.mcbp = true;
3479
3480 if (adev->gfx.mcbp)
3481 DRM_INFO("MCBP is enabled\n");
3482 }
3483
3484 /**
3485 * amdgpu_device_init - initialize the driver
3486 *
3487 * @adev: amdgpu_device pointer
3488 * @flags: driver flags
3489 *
3490 * Initializes the driver info and hw (all asics).
3491 * Returns 0 for success or an error on failure.
3492 * Called at driver startup.
3493 */
amdgpu_device_init(struct amdgpu_device * adev,uint32_t flags)3494 int amdgpu_device_init(struct amdgpu_device *adev,
3495 uint32_t flags)
3496 {
3497 struct drm_device *ddev = adev_to_drm(adev);
3498 struct pci_dev *pdev = adev->pdev;
3499 int r, i;
3500 bool px = false;
3501 u32 max_MBps;
3502 int tmp;
3503
3504 adev->shutdown = false;
3505 adev->flags = flags;
3506
3507 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3508 adev->asic_type = amdgpu_force_asic_type;
3509 else
3510 adev->asic_type = flags & AMD_ASIC_MASK;
3511
3512 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3513 if (amdgpu_emu_mode == 1)
3514 adev->usec_timeout *= 10;
3515 adev->gmc.gart_size = 512 * 1024 * 1024;
3516 adev->accel_working = false;
3517 adev->num_rings = 0;
3518 RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3519 adev->mman.buffer_funcs = NULL;
3520 adev->mman.buffer_funcs_ring = NULL;
3521 adev->vm_manager.vm_pte_funcs = NULL;
3522 adev->vm_manager.vm_pte_num_scheds = 0;
3523 adev->gmc.gmc_funcs = NULL;
3524 adev->harvest_ip_mask = 0x0;
3525 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3526 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3527
3528 adev->smc_rreg = &amdgpu_invalid_rreg;
3529 adev->smc_wreg = &amdgpu_invalid_wreg;
3530 adev->pcie_rreg = &amdgpu_invalid_rreg;
3531 adev->pcie_wreg = &amdgpu_invalid_wreg;
3532 adev->pcie_rreg_ext = &amdgpu_invalid_rreg_ext;
3533 adev->pcie_wreg_ext = &amdgpu_invalid_wreg_ext;
3534 adev->pciep_rreg = &amdgpu_invalid_rreg;
3535 adev->pciep_wreg = &amdgpu_invalid_wreg;
3536 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3537 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3538 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3539 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3540 adev->didt_rreg = &amdgpu_invalid_rreg;
3541 adev->didt_wreg = &amdgpu_invalid_wreg;
3542 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3543 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3544 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3545 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3546
3547 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3548 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3549 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3550
3551 /* mutex initialization are all done here so we
3552 * can recall function without having locking issues
3553 */
3554 mutex_init(&adev->firmware.mutex);
3555 mutex_init(&adev->pm.mutex);
3556 mutex_init(&adev->gfx.gpu_clock_mutex);
3557 mutex_init(&adev->srbm_mutex);
3558 mutex_init(&adev->gfx.pipe_reserve_mutex);
3559 mutex_init(&adev->gfx.gfx_off_mutex);
3560 mutex_init(&adev->gfx.partition_mutex);
3561 mutex_init(&adev->grbm_idx_mutex);
3562 mutex_init(&adev->mn_lock);
3563 mutex_init(&adev->virt.vf_errors.lock);
3564 mutex_init(&adev->virt.rlcg_reg_lock);
3565 hash_init(adev->mn_hash);
3566 mutex_init(&adev->psp.mutex);
3567 mutex_init(&adev->notifier_lock);
3568 mutex_init(&adev->pm.stable_pstate_ctx_lock);
3569 mutex_init(&adev->benchmark_mutex);
3570
3571 amdgpu_device_init_apu_flags(adev);
3572
3573 r = amdgpu_device_check_arguments(adev);
3574 if (r)
3575 return r;
3576
3577 spin_lock_init(&adev->mmio_idx_lock);
3578 spin_lock_init(&adev->smc_idx_lock);
3579 spin_lock_init(&adev->pcie_idx_lock);
3580 spin_lock_init(&adev->uvd_ctx_idx_lock);
3581 spin_lock_init(&adev->didt_idx_lock);
3582 spin_lock_init(&adev->gc_cac_idx_lock);
3583 spin_lock_init(&adev->se_cac_idx_lock);
3584 spin_lock_init(&adev->audio_endpt_idx_lock);
3585 spin_lock_init(&adev->mm_stats.lock);
3586
3587 INIT_LIST_HEAD(&adev->shadow_list);
3588 mutex_init(&adev->shadow_list_lock);
3589
3590 INIT_LIST_HEAD(&adev->reset_list);
3591
3592 INIT_LIST_HEAD(&adev->ras_list);
3593
3594 INIT_DELAYED_WORK(&adev->delayed_init_work,
3595 amdgpu_device_delayed_init_work_handler);
3596 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3597 amdgpu_device_delay_enable_gfx_off);
3598
3599 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3600
3601 adev->gfx.gfx_off_req_count = 1;
3602 adev->gfx.gfx_off_residency = 0;
3603 adev->gfx.gfx_off_entrycount = 0;
3604 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3605
3606 atomic_set(&adev->throttling_logging_enabled, 1);
3607 /*
3608 * If throttling continues, logging will be performed every minute
3609 * to avoid log flooding. "-1" is subtracted since the thermal
3610 * throttling interrupt comes every second. Thus, the total logging
3611 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3612 * for throttling interrupt) = 60 seconds.
3613 */
3614 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3615 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3616
3617 /* Registers mapping */
3618 /* TODO: block userspace mapping of io register */
3619 if (adev->asic_type >= CHIP_BONAIRE) {
3620 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3621 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3622 } else {
3623 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3624 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3625 }
3626
3627 for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3628 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3629
3630 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3631 if (!adev->rmmio)
3632 return -ENOMEM;
3633
3634 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3635 DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size);
3636
3637 /*
3638 * Reset domain needs to be present early, before XGMI hive discovered
3639 * (if any) and intitialized to use reset sem and in_gpu reset flag
3640 * early on during init and before calling to RREG32.
3641 */
3642 adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3643 if (!adev->reset_domain)
3644 return -ENOMEM;
3645
3646 /* detect hw virtualization here */
3647 amdgpu_detect_virtualization(adev);
3648
3649 amdgpu_device_get_pcie_info(adev);
3650
3651 r = amdgpu_device_get_job_timeout_settings(adev);
3652 if (r) {
3653 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3654 return r;
3655 }
3656
3657 /* early init functions */
3658 r = amdgpu_device_ip_early_init(adev);
3659 if (r)
3660 return r;
3661
3662 amdgpu_device_set_mcbp(adev);
3663
3664 /* Get rid of things like offb */
3665 r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
3666 if (r)
3667 return r;
3668
3669 /* Enable TMZ based on IP_VERSION */
3670 amdgpu_gmc_tmz_set(adev);
3671
3672 amdgpu_gmc_noretry_set(adev);
3673 /* Need to get xgmi info early to decide the reset behavior*/
3674 if (adev->gmc.xgmi.supported) {
3675 r = adev->gfxhub.funcs->get_xgmi_info(adev);
3676 if (r)
3677 return r;
3678 }
3679
3680 /* enable PCIE atomic ops */
3681 if (amdgpu_sriov_vf(adev)) {
3682 if (adev->virt.fw_reserve.p_pf2vf)
3683 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3684 adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3685 (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3686 /* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
3687 * internal path natively support atomics, set have_atomics_support to true.
3688 */
3689 } else if ((adev->flags & AMD_IS_APU) &&
3690 (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))) {
3691 adev->have_atomics_support = true;
3692 } else {
3693 adev->have_atomics_support =
3694 !pci_enable_atomic_ops_to_root(adev->pdev,
3695 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3696 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3697 }
3698
3699 if (!adev->have_atomics_support)
3700 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3701
3702 /* doorbell bar mapping and doorbell index init*/
3703 amdgpu_doorbell_init(adev);
3704
3705 if (amdgpu_emu_mode == 1) {
3706 /* post the asic on emulation mode */
3707 emu_soc_asic_init(adev);
3708 goto fence_driver_init;
3709 }
3710
3711 amdgpu_reset_init(adev);
3712
3713 /* detect if we are with an SRIOV vbios */
3714 if (adev->bios)
3715 amdgpu_device_detect_sriov_bios(adev);
3716
3717 /* check if we need to reset the asic
3718 * E.g., driver was not cleanly unloaded previously, etc.
3719 */
3720 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3721 if (adev->gmc.xgmi.num_physical_nodes) {
3722 dev_info(adev->dev, "Pending hive reset.\n");
3723 adev->gmc.xgmi.pending_reset = true;
3724 /* Only need to init necessary block for SMU to handle the reset */
3725 for (i = 0; i < adev->num_ip_blocks; i++) {
3726 if (!adev->ip_blocks[i].status.valid)
3727 continue;
3728 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3729 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3730 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3731 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3732 DRM_DEBUG("IP %s disabled for hw_init.\n",
3733 adev->ip_blocks[i].version->funcs->name);
3734 adev->ip_blocks[i].status.hw = true;
3735 }
3736 }
3737 } else {
3738 tmp = amdgpu_reset_method;
3739 /* It should do a default reset when loading or reloading the driver,
3740 * regardless of the module parameter reset_method.
3741 */
3742 amdgpu_reset_method = AMD_RESET_METHOD_NONE;
3743 r = amdgpu_asic_reset(adev);
3744 amdgpu_reset_method = tmp;
3745 if (r) {
3746 dev_err(adev->dev, "asic reset on init failed\n");
3747 goto failed;
3748 }
3749 }
3750 }
3751
3752 /* Post card if necessary */
3753 if (amdgpu_device_need_post(adev)) {
3754 if (!adev->bios) {
3755 dev_err(adev->dev, "no vBIOS found\n");
3756 r = -EINVAL;
3757 goto failed;
3758 }
3759 DRM_INFO("GPU posting now...\n");
3760 r = amdgpu_device_asic_init(adev);
3761 if (r) {
3762 dev_err(adev->dev, "gpu post error!\n");
3763 goto failed;
3764 }
3765 }
3766
3767 if (adev->bios) {
3768 if (adev->is_atom_fw) {
3769 /* Initialize clocks */
3770 r = amdgpu_atomfirmware_get_clock_info(adev);
3771 if (r) {
3772 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3773 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3774 goto failed;
3775 }
3776 } else {
3777 /* Initialize clocks */
3778 r = amdgpu_atombios_get_clock_info(adev);
3779 if (r) {
3780 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3781 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3782 goto failed;
3783 }
3784 /* init i2c buses */
3785 if (!amdgpu_device_has_dc_support(adev))
3786 amdgpu_atombios_i2c_init(adev);
3787 }
3788 }
3789
3790 fence_driver_init:
3791 /* Fence driver */
3792 r = amdgpu_fence_driver_sw_init(adev);
3793 if (r) {
3794 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3795 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3796 goto failed;
3797 }
3798
3799 /* init the mode config */
3800 drm_mode_config_init(adev_to_drm(adev));
3801
3802 r = amdgpu_device_ip_init(adev);
3803 if (r) {
3804 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3805 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3806 goto release_ras_con;
3807 }
3808
3809 amdgpu_fence_driver_hw_init(adev);
3810
3811 dev_info(adev->dev,
3812 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3813 adev->gfx.config.max_shader_engines,
3814 adev->gfx.config.max_sh_per_se,
3815 adev->gfx.config.max_cu_per_sh,
3816 adev->gfx.cu_info.number);
3817
3818 adev->accel_working = true;
3819
3820 amdgpu_vm_check_compute_bug(adev);
3821
3822 /* Initialize the buffer migration limit. */
3823 if (amdgpu_moverate >= 0)
3824 max_MBps = amdgpu_moverate;
3825 else
3826 max_MBps = 8; /* Allow 8 MB/s. */
3827 /* Get a log2 for easy divisions. */
3828 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3829
3830 r = amdgpu_atombios_sysfs_init(adev);
3831 if (r)
3832 drm_err(&adev->ddev,
3833 "registering atombios sysfs failed (%d).\n", r);
3834
3835 r = amdgpu_pm_sysfs_init(adev);
3836 if (r)
3837 DRM_ERROR("registering pm sysfs failed (%d).\n", r);
3838
3839 r = amdgpu_ucode_sysfs_init(adev);
3840 if (r) {
3841 adev->ucode_sysfs_en = false;
3842 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3843 } else
3844 adev->ucode_sysfs_en = true;
3845
3846 /*
3847 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3848 * Otherwise the mgpu fan boost feature will be skipped due to the
3849 * gpu instance is counted less.
3850 */
3851 amdgpu_register_gpu_instance(adev);
3852
3853 /* enable clockgating, etc. after ib tests, etc. since some blocks require
3854 * explicit gating rather than handling it automatically.
3855 */
3856 if (!adev->gmc.xgmi.pending_reset) {
3857 r = amdgpu_device_ip_late_init(adev);
3858 if (r) {
3859 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3860 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3861 goto release_ras_con;
3862 }
3863 /* must succeed. */
3864 amdgpu_ras_resume(adev);
3865 queue_delayed_work(system_wq, &adev->delayed_init_work,
3866 msecs_to_jiffies(AMDGPU_RESUME_MS));
3867 }
3868
3869 if (amdgpu_sriov_vf(adev)) {
3870 amdgpu_virt_release_full_gpu(adev, true);
3871 flush_delayed_work(&adev->delayed_init_work);
3872 }
3873
3874 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3875 if (r)
3876 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3877
3878 amdgpu_fru_sysfs_init(adev);
3879
3880 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3881 r = amdgpu_pmu_init(adev);
3882 if (r)
3883 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3884
3885 /* Have stored pci confspace at hand for restore in sudden PCI error */
3886 if (amdgpu_device_cache_pci_state(adev->pdev))
3887 pci_restore_state(pdev);
3888
3889 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3890 /* this will fail for cards that aren't VGA class devices, just
3891 * ignore it
3892 */
3893 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3894 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3895
3896 px = amdgpu_device_supports_px(ddev);
3897
3898 if (px || (!dev_is_removable(&adev->pdev->dev) &&
3899 apple_gmux_detect(NULL, NULL)))
3900 vga_switcheroo_register_client(adev->pdev,
3901 &amdgpu_switcheroo_ops, px);
3902
3903 if (px)
3904 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3905
3906 if (adev->gmc.xgmi.pending_reset)
3907 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3908 msecs_to_jiffies(AMDGPU_RESUME_MS));
3909
3910 amdgpu_device_check_iommu_direct_map(adev);
3911
3912 return 0;
3913
3914 release_ras_con:
3915 if (amdgpu_sriov_vf(adev))
3916 amdgpu_virt_release_full_gpu(adev, true);
3917
3918 /* failed in exclusive mode due to timeout */
3919 if (amdgpu_sriov_vf(adev) &&
3920 !amdgpu_sriov_runtime(adev) &&
3921 amdgpu_virt_mmio_blocked(adev) &&
3922 !amdgpu_virt_wait_reset(adev)) {
3923 dev_err(adev->dev, "VF exclusive mode timeout\n");
3924 /* Don't send request since VF is inactive. */
3925 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3926 adev->virt.ops = NULL;
3927 r = -EAGAIN;
3928 }
3929 amdgpu_release_ras_context(adev);
3930
3931 failed:
3932 amdgpu_vf_error_trans_all(adev);
3933
3934 return r;
3935 }
3936
amdgpu_device_unmap_mmio(struct amdgpu_device * adev)3937 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3938 {
3939
3940 /* Clear all CPU mappings pointing to this device */
3941 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3942
3943 /* Unmap all mapped bars - Doorbell, registers and VRAM */
3944 amdgpu_doorbell_fini(adev);
3945
3946 iounmap(adev->rmmio);
3947 adev->rmmio = NULL;
3948 if (adev->mman.aper_base_kaddr)
3949 iounmap(adev->mman.aper_base_kaddr);
3950 adev->mman.aper_base_kaddr = NULL;
3951
3952 /* Memory manager related */
3953 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
3954 arch_phys_wc_del(adev->gmc.vram_mtrr);
3955 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3956 }
3957 }
3958
3959 /**
3960 * amdgpu_device_fini_hw - tear down the driver
3961 *
3962 * @adev: amdgpu_device pointer
3963 *
3964 * Tear down the driver info (all asics).
3965 * Called at driver shutdown.
3966 */
amdgpu_device_fini_hw(struct amdgpu_device * adev)3967 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3968 {
3969 dev_info(adev->dev, "amdgpu: finishing device.\n");
3970 flush_delayed_work(&adev->delayed_init_work);
3971 adev->shutdown = true;
3972
3973 /* make sure IB test finished before entering exclusive mode
3974 * to avoid preemption on IB test
3975 */
3976 if (amdgpu_sriov_vf(adev)) {
3977 amdgpu_virt_request_full_gpu(adev, false);
3978 amdgpu_virt_fini_data_exchange(adev);
3979 }
3980
3981 /* disable all interrupts */
3982 amdgpu_irq_disable_all(adev);
3983 if (adev->mode_info.mode_config_initialized) {
3984 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
3985 drm_helper_force_disable_all(adev_to_drm(adev));
3986 else
3987 drm_atomic_helper_shutdown(adev_to_drm(adev));
3988 }
3989 amdgpu_fence_driver_hw_fini(adev);
3990
3991 if (adev->mman.initialized)
3992 drain_workqueue(adev->mman.bdev.wq);
3993
3994 if (adev->pm.sysfs_initialized)
3995 amdgpu_pm_sysfs_fini(adev);
3996 if (adev->ucode_sysfs_en)
3997 amdgpu_ucode_sysfs_fini(adev);
3998 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3999 amdgpu_fru_sysfs_fini(adev);
4000
4001 /* disable ras feature must before hw fini */
4002 amdgpu_ras_pre_fini(adev);
4003
4004 amdgpu_device_ip_fini_early(adev);
4005
4006 amdgpu_irq_fini_hw(adev);
4007
4008 if (adev->mman.initialized)
4009 ttm_device_clear_dma_mappings(&adev->mman.bdev);
4010
4011 amdgpu_gart_dummy_page_fini(adev);
4012
4013 if (drm_dev_is_unplugged(adev_to_drm(adev)))
4014 amdgpu_device_unmap_mmio(adev);
4015
4016 }
4017
amdgpu_device_fini_sw(struct amdgpu_device * adev)4018 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4019 {
4020 int idx;
4021 bool px;
4022
4023 amdgpu_fence_driver_sw_fini(adev);
4024 amdgpu_device_ip_fini(adev);
4025 amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
4026 adev->accel_working = false;
4027 dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4028
4029 amdgpu_reset_fini(adev);
4030
4031 /* free i2c buses */
4032 if (!amdgpu_device_has_dc_support(adev))
4033 amdgpu_i2c_fini(adev);
4034
4035 if (amdgpu_emu_mode != 1)
4036 amdgpu_atombios_fini(adev);
4037
4038 kfree(adev->bios);
4039 adev->bios = NULL;
4040
4041 px = amdgpu_device_supports_px(adev_to_drm(adev));
4042
4043 if (px || (!dev_is_removable(&adev->pdev->dev) &&
4044 apple_gmux_detect(NULL, NULL)))
4045 vga_switcheroo_unregister_client(adev->pdev);
4046
4047 if (px)
4048 vga_switcheroo_fini_domain_pm_ops(adev->dev);
4049
4050 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4051 vga_client_unregister(adev->pdev);
4052
4053 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4054
4055 iounmap(adev->rmmio);
4056 adev->rmmio = NULL;
4057 amdgpu_doorbell_fini(adev);
4058 drm_dev_exit(idx);
4059 }
4060
4061 if (IS_ENABLED(CONFIG_PERF_EVENTS))
4062 amdgpu_pmu_fini(adev);
4063 if (adev->mman.discovery_bin)
4064 amdgpu_discovery_fini(adev);
4065
4066 amdgpu_reset_put_reset_domain(adev->reset_domain);
4067 adev->reset_domain = NULL;
4068
4069 kfree(adev->pci_state);
4070
4071 }
4072
4073 /**
4074 * amdgpu_device_evict_resources - evict device resources
4075 * @adev: amdgpu device object
4076 *
4077 * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4078 * of the vram memory type. Mainly used for evicting device resources
4079 * at suspend time.
4080 *
4081 */
amdgpu_device_evict_resources(struct amdgpu_device * adev)4082 static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4083 {
4084 int ret;
4085
4086 /* No need to evict vram on APUs for suspend to ram or s2idle */
4087 if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4088 return 0;
4089
4090 ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4091 if (ret)
4092 DRM_WARN("evicting device resources failed\n");
4093 return ret;
4094 }
4095
4096 /*
4097 * Suspend & resume.
4098 */
4099 /**
4100 * amdgpu_device_prepare - prepare for device suspend
4101 *
4102 * @dev: drm dev pointer
4103 *
4104 * Prepare to put the hw in the suspend state (all asics).
4105 * Returns 0 for success or an error on failure.
4106 * Called at driver suspend.
4107 */
amdgpu_device_prepare(struct drm_device * dev)4108 int amdgpu_device_prepare(struct drm_device *dev)
4109 {
4110 struct amdgpu_device *adev = drm_to_adev(dev);
4111 int i, r;
4112
4113 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4114 return 0;
4115
4116 /* Evict the majority of BOs before starting suspend sequence */
4117 r = amdgpu_device_evict_resources(adev);
4118 if (r)
4119 return r;
4120
4121 flush_delayed_work(&adev->gfx.gfx_off_delay_work);
4122
4123 for (i = 0; i < adev->num_ip_blocks; i++) {
4124 if (!adev->ip_blocks[i].status.valid)
4125 continue;
4126 if (!adev->ip_blocks[i].version->funcs->prepare_suspend)
4127 continue;
4128 r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev);
4129 if (r)
4130 return r;
4131 }
4132
4133 return 0;
4134 }
4135
4136 /**
4137 * amdgpu_device_suspend - initiate device suspend
4138 *
4139 * @dev: drm dev pointer
4140 * @fbcon : notify the fbdev of suspend
4141 *
4142 * Puts the hw in the suspend state (all asics).
4143 * Returns 0 for success or an error on failure.
4144 * Called at driver suspend.
4145 */
amdgpu_device_suspend(struct drm_device * dev,bool fbcon)4146 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4147 {
4148 struct amdgpu_device *adev = drm_to_adev(dev);
4149 int r = 0;
4150
4151 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4152 return 0;
4153
4154 adev->in_suspend = true;
4155
4156 if (amdgpu_sriov_vf(adev)) {
4157 amdgpu_virt_fini_data_exchange(adev);
4158 r = amdgpu_virt_request_full_gpu(adev, false);
4159 if (r)
4160 return r;
4161 }
4162
4163 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4164 DRM_WARN("smart shift update failed\n");
4165
4166 if (fbcon)
4167 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4168
4169 cancel_delayed_work_sync(&adev->delayed_init_work);
4170
4171 amdgpu_ras_suspend(adev);
4172
4173 amdgpu_device_ip_suspend_phase1(adev);
4174
4175 if (!adev->in_s0ix)
4176 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4177
4178 r = amdgpu_device_evict_resources(adev);
4179 if (r)
4180 return r;
4181
4182 amdgpu_fence_driver_hw_fini(adev);
4183
4184 amdgpu_device_ip_suspend_phase2(adev);
4185
4186 if (amdgpu_sriov_vf(adev))
4187 amdgpu_virt_release_full_gpu(adev, false);
4188
4189 return 0;
4190 }
4191
4192 /**
4193 * amdgpu_device_resume - initiate device resume
4194 *
4195 * @dev: drm dev pointer
4196 * @fbcon : notify the fbdev of resume
4197 *
4198 * Bring the hw back to operating state (all asics).
4199 * Returns 0 for success or an error on failure.
4200 * Called at driver resume.
4201 */
amdgpu_device_resume(struct drm_device * dev,bool fbcon)4202 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4203 {
4204 struct amdgpu_device *adev = drm_to_adev(dev);
4205 int r = 0;
4206
4207 if (amdgpu_sriov_vf(adev)) {
4208 r = amdgpu_virt_request_full_gpu(adev, true);
4209 if (r)
4210 return r;
4211 }
4212
4213 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4214 return 0;
4215
4216 if (adev->in_s0ix)
4217 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4218
4219 /* post card */
4220 if (amdgpu_device_need_post(adev)) {
4221 r = amdgpu_device_asic_init(adev);
4222 if (r)
4223 dev_err(adev->dev, "amdgpu asic init failed\n");
4224 }
4225
4226 r = amdgpu_device_ip_resume(adev);
4227
4228 if (r) {
4229 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4230 goto exit;
4231 }
4232 amdgpu_fence_driver_hw_init(adev);
4233
4234 r = amdgpu_device_ip_late_init(adev);
4235 if (r)
4236 goto exit;
4237
4238 queue_delayed_work(system_wq, &adev->delayed_init_work,
4239 msecs_to_jiffies(AMDGPU_RESUME_MS));
4240
4241 if (!adev->in_s0ix) {
4242 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4243 if (r)
4244 goto exit;
4245 }
4246
4247 exit:
4248 if (amdgpu_sriov_vf(adev)) {
4249 amdgpu_virt_init_data_exchange(adev);
4250 amdgpu_virt_release_full_gpu(adev, true);
4251 }
4252
4253 if (r)
4254 return r;
4255
4256 /* Make sure IB tests flushed */
4257 flush_delayed_work(&adev->delayed_init_work);
4258
4259 if (fbcon)
4260 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4261
4262 amdgpu_ras_resume(adev);
4263
4264 if (adev->mode_info.num_crtc) {
4265 /*
4266 * Most of the connector probing functions try to acquire runtime pm
4267 * refs to ensure that the GPU is powered on when connector polling is
4268 * performed. Since we're calling this from a runtime PM callback,
4269 * trying to acquire rpm refs will cause us to deadlock.
4270 *
4271 * Since we're guaranteed to be holding the rpm lock, it's safe to
4272 * temporarily disable the rpm helpers so this doesn't deadlock us.
4273 */
4274 #ifdef CONFIG_PM
4275 dev->dev->power.disable_depth++;
4276 #endif
4277 if (!adev->dc_enabled)
4278 drm_helper_hpd_irq_event(dev);
4279 else
4280 drm_kms_helper_hotplug_event(dev);
4281 #ifdef CONFIG_PM
4282 dev->dev->power.disable_depth--;
4283 #endif
4284 }
4285 adev->in_suspend = false;
4286
4287 if (adev->enable_mes)
4288 amdgpu_mes_self_test(adev);
4289
4290 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4291 DRM_WARN("smart shift update failed\n");
4292
4293 return 0;
4294 }
4295
4296 /**
4297 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4298 *
4299 * @adev: amdgpu_device pointer
4300 *
4301 * The list of all the hardware IPs that make up the asic is walked and
4302 * the check_soft_reset callbacks are run. check_soft_reset determines
4303 * if the asic is still hung or not.
4304 * Returns true if any of the IPs are still in a hung state, false if not.
4305 */
amdgpu_device_ip_check_soft_reset(struct amdgpu_device * adev)4306 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4307 {
4308 int i;
4309 bool asic_hang = false;
4310
4311 if (amdgpu_sriov_vf(adev))
4312 return true;
4313
4314 if (amdgpu_asic_need_full_reset(adev))
4315 return true;
4316
4317 for (i = 0; i < adev->num_ip_blocks; i++) {
4318 if (!adev->ip_blocks[i].status.valid)
4319 continue;
4320 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4321 adev->ip_blocks[i].status.hang =
4322 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4323 if (adev->ip_blocks[i].status.hang) {
4324 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4325 asic_hang = true;
4326 }
4327 }
4328 return asic_hang;
4329 }
4330
4331 /**
4332 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4333 *
4334 * @adev: amdgpu_device pointer
4335 *
4336 * The list of all the hardware IPs that make up the asic is walked and the
4337 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
4338 * handles any IP specific hardware or software state changes that are
4339 * necessary for a soft reset to succeed.
4340 * Returns 0 on success, negative error code on failure.
4341 */
amdgpu_device_ip_pre_soft_reset(struct amdgpu_device * adev)4342 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4343 {
4344 int i, r = 0;
4345
4346 for (i = 0; i < adev->num_ip_blocks; i++) {
4347 if (!adev->ip_blocks[i].status.valid)
4348 continue;
4349 if (adev->ip_blocks[i].status.hang &&
4350 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4351 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4352 if (r)
4353 return r;
4354 }
4355 }
4356
4357 return 0;
4358 }
4359
4360 /**
4361 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4362 *
4363 * @adev: amdgpu_device pointer
4364 *
4365 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
4366 * reset is necessary to recover.
4367 * Returns true if a full asic reset is required, false if not.
4368 */
amdgpu_device_ip_need_full_reset(struct amdgpu_device * adev)4369 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4370 {
4371 int i;
4372
4373 if (amdgpu_asic_need_full_reset(adev))
4374 return true;
4375
4376 for (i = 0; i < adev->num_ip_blocks; i++) {
4377 if (!adev->ip_blocks[i].status.valid)
4378 continue;
4379 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4380 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4381 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4382 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4383 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4384 if (adev->ip_blocks[i].status.hang) {
4385 dev_info(adev->dev, "Some block need full reset!\n");
4386 return true;
4387 }
4388 }
4389 }
4390 return false;
4391 }
4392
4393 /**
4394 * amdgpu_device_ip_soft_reset - do a soft reset
4395 *
4396 * @adev: amdgpu_device pointer
4397 *
4398 * The list of all the hardware IPs that make up the asic is walked and the
4399 * soft_reset callbacks are run if the block is hung. soft_reset handles any
4400 * IP specific hardware or software state changes that are necessary to soft
4401 * reset the IP.
4402 * Returns 0 on success, negative error code on failure.
4403 */
amdgpu_device_ip_soft_reset(struct amdgpu_device * adev)4404 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4405 {
4406 int i, r = 0;
4407
4408 for (i = 0; i < adev->num_ip_blocks; i++) {
4409 if (!adev->ip_blocks[i].status.valid)
4410 continue;
4411 if (adev->ip_blocks[i].status.hang &&
4412 adev->ip_blocks[i].version->funcs->soft_reset) {
4413 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4414 if (r)
4415 return r;
4416 }
4417 }
4418
4419 return 0;
4420 }
4421
4422 /**
4423 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4424 *
4425 * @adev: amdgpu_device pointer
4426 *
4427 * The list of all the hardware IPs that make up the asic is walked and the
4428 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
4429 * handles any IP specific hardware or software state changes that are
4430 * necessary after the IP has been soft reset.
4431 * Returns 0 on success, negative error code on failure.
4432 */
amdgpu_device_ip_post_soft_reset(struct amdgpu_device * adev)4433 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4434 {
4435 int i, r = 0;
4436
4437 for (i = 0; i < adev->num_ip_blocks; i++) {
4438 if (!adev->ip_blocks[i].status.valid)
4439 continue;
4440 if (adev->ip_blocks[i].status.hang &&
4441 adev->ip_blocks[i].version->funcs->post_soft_reset)
4442 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4443 if (r)
4444 return r;
4445 }
4446
4447 return 0;
4448 }
4449
4450 /**
4451 * amdgpu_device_recover_vram - Recover some VRAM contents
4452 *
4453 * @adev: amdgpu_device pointer
4454 *
4455 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
4456 * restore things like GPUVM page tables after a GPU reset where
4457 * the contents of VRAM might be lost.
4458 *
4459 * Returns:
4460 * 0 on success, negative error code on failure.
4461 */
amdgpu_device_recover_vram(struct amdgpu_device * adev)4462 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4463 {
4464 struct dma_fence *fence = NULL, *next = NULL;
4465 struct amdgpu_bo *shadow;
4466 struct amdgpu_bo_vm *vmbo;
4467 long r = 1, tmo;
4468
4469 if (amdgpu_sriov_runtime(adev))
4470 tmo = msecs_to_jiffies(8000);
4471 else
4472 tmo = msecs_to_jiffies(100);
4473
4474 dev_info(adev->dev, "recover vram bo from shadow start\n");
4475 mutex_lock(&adev->shadow_list_lock);
4476 list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4477 /* If vm is compute context or adev is APU, shadow will be NULL */
4478 if (!vmbo->shadow)
4479 continue;
4480 shadow = vmbo->shadow;
4481
4482 /* No need to recover an evicted BO */
4483 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4484 shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4485 shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4486 continue;
4487
4488 r = amdgpu_bo_restore_shadow(shadow, &next);
4489 if (r)
4490 break;
4491
4492 if (fence) {
4493 tmo = dma_fence_wait_timeout(fence, false, tmo);
4494 dma_fence_put(fence);
4495 fence = next;
4496 if (tmo == 0) {
4497 r = -ETIMEDOUT;
4498 break;
4499 } else if (tmo < 0) {
4500 r = tmo;
4501 break;
4502 }
4503 } else {
4504 fence = next;
4505 }
4506 }
4507 mutex_unlock(&adev->shadow_list_lock);
4508
4509 if (fence)
4510 tmo = dma_fence_wait_timeout(fence, false, tmo);
4511 dma_fence_put(fence);
4512
4513 if (r < 0 || tmo <= 0) {
4514 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4515 return -EIO;
4516 }
4517
4518 dev_info(adev->dev, "recover vram bo from shadow done\n");
4519 return 0;
4520 }
4521
4522
4523 /**
4524 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4525 *
4526 * @adev: amdgpu_device pointer
4527 * @from_hypervisor: request from hypervisor
4528 *
4529 * do VF FLR and reinitialize Asic
4530 * return 0 means succeeded otherwise failed
4531 */
amdgpu_device_reset_sriov(struct amdgpu_device * adev,bool from_hypervisor)4532 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4533 bool from_hypervisor)
4534 {
4535 int r;
4536 struct amdgpu_hive_info *hive = NULL;
4537 int retry_limit = 0;
4538
4539 retry:
4540 amdgpu_amdkfd_pre_reset(adev);
4541
4542 if (from_hypervisor)
4543 r = amdgpu_virt_request_full_gpu(adev, true);
4544 else
4545 r = amdgpu_virt_reset_gpu(adev);
4546 if (r)
4547 return r;
4548 amdgpu_irq_gpu_reset_resume_helper(adev);
4549
4550 /* some sw clean up VF needs to do before recover */
4551 amdgpu_virt_post_reset(adev);
4552
4553 /* Resume IP prior to SMC */
4554 r = amdgpu_device_ip_reinit_early_sriov(adev);
4555 if (r)
4556 goto error;
4557
4558 amdgpu_virt_init_data_exchange(adev);
4559
4560 r = amdgpu_device_fw_loading(adev);
4561 if (r)
4562 return r;
4563
4564 /* now we are okay to resume SMC/CP/SDMA */
4565 r = amdgpu_device_ip_reinit_late_sriov(adev);
4566 if (r)
4567 goto error;
4568
4569 hive = amdgpu_get_xgmi_hive(adev);
4570 /* Update PSP FW topology after reset */
4571 if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4572 r = amdgpu_xgmi_update_topology(hive, adev);
4573
4574 if (hive)
4575 amdgpu_put_xgmi_hive(hive);
4576
4577 if (!r) {
4578 r = amdgpu_ib_ring_tests(adev);
4579
4580 amdgpu_amdkfd_post_reset(adev);
4581 }
4582
4583 error:
4584 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4585 amdgpu_inc_vram_lost(adev);
4586 r = amdgpu_device_recover_vram(adev);
4587 }
4588 amdgpu_virt_release_full_gpu(adev, true);
4589
4590 if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4591 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4592 retry_limit++;
4593 goto retry;
4594 } else
4595 DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4596 }
4597
4598 return r;
4599 }
4600
4601 /**
4602 * amdgpu_device_has_job_running - check if there is any job in mirror list
4603 *
4604 * @adev: amdgpu_device pointer
4605 *
4606 * check if there is any job in mirror list
4607 */
amdgpu_device_has_job_running(struct amdgpu_device * adev)4608 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4609 {
4610 int i;
4611 struct drm_sched_job *job;
4612
4613 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4614 struct amdgpu_ring *ring = adev->rings[i];
4615
4616 if (!ring || !ring->sched.thread)
4617 continue;
4618
4619 spin_lock(&ring->sched.job_list_lock);
4620 job = list_first_entry_or_null(&ring->sched.pending_list,
4621 struct drm_sched_job, list);
4622 spin_unlock(&ring->sched.job_list_lock);
4623 if (job)
4624 return true;
4625 }
4626 return false;
4627 }
4628
4629 /**
4630 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4631 *
4632 * @adev: amdgpu_device pointer
4633 *
4634 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4635 * a hung GPU.
4636 */
amdgpu_device_should_recover_gpu(struct amdgpu_device * adev)4637 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4638 {
4639
4640 if (amdgpu_gpu_recovery == 0)
4641 goto disabled;
4642
4643 /* Skip soft reset check in fatal error mode */
4644 if (!amdgpu_ras_is_poison_mode_supported(adev))
4645 return true;
4646
4647 if (amdgpu_sriov_vf(adev))
4648 return true;
4649
4650 if (amdgpu_gpu_recovery == -1) {
4651 switch (adev->asic_type) {
4652 #ifdef CONFIG_DRM_AMDGPU_SI
4653 case CHIP_VERDE:
4654 case CHIP_TAHITI:
4655 case CHIP_PITCAIRN:
4656 case CHIP_OLAND:
4657 case CHIP_HAINAN:
4658 #endif
4659 #ifdef CONFIG_DRM_AMDGPU_CIK
4660 case CHIP_KAVERI:
4661 case CHIP_KABINI:
4662 case CHIP_MULLINS:
4663 #endif
4664 case CHIP_CARRIZO:
4665 case CHIP_STONEY:
4666 case CHIP_CYAN_SKILLFISH:
4667 goto disabled;
4668 default:
4669 break;
4670 }
4671 }
4672
4673 return true;
4674
4675 disabled:
4676 dev_info(adev->dev, "GPU recovery disabled.\n");
4677 return false;
4678 }
4679
amdgpu_device_mode1_reset(struct amdgpu_device * adev)4680 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4681 {
4682 u32 i;
4683 int ret = 0;
4684
4685 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4686
4687 dev_info(adev->dev, "GPU mode1 reset\n");
4688
4689 /* Cache the state before bus master disable. The saved config space
4690 * values are used in other cases like restore after mode-2 reset.
4691 */
4692 amdgpu_device_cache_pci_state(adev->pdev);
4693
4694 /* disable BM */
4695 pci_clear_master(adev->pdev);
4696
4697 if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4698 dev_info(adev->dev, "GPU smu mode1 reset\n");
4699 ret = amdgpu_dpm_mode1_reset(adev);
4700 } else {
4701 dev_info(adev->dev, "GPU psp mode1 reset\n");
4702 ret = psp_gpu_reset(adev);
4703 }
4704
4705 if (ret)
4706 goto mode1_reset_failed;
4707
4708 amdgpu_device_load_pci_state(adev->pdev);
4709 ret = amdgpu_psp_wait_for_bootloader(adev);
4710 if (ret)
4711 goto mode1_reset_failed;
4712
4713 /* wait for asic to come out of reset */
4714 for (i = 0; i < adev->usec_timeout; i++) {
4715 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4716
4717 if (memsize != 0xffffffff)
4718 break;
4719 udelay(1);
4720 }
4721
4722 if (i >= adev->usec_timeout) {
4723 ret = -ETIMEDOUT;
4724 goto mode1_reset_failed;
4725 }
4726
4727 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4728
4729 return 0;
4730
4731 mode1_reset_failed:
4732 dev_err(adev->dev, "GPU mode1 reset failed\n");
4733 return ret;
4734 }
4735
amdgpu_device_pre_asic_reset(struct amdgpu_device * adev,struct amdgpu_reset_context * reset_context)4736 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4737 struct amdgpu_reset_context *reset_context)
4738 {
4739 int i, r = 0;
4740 struct amdgpu_job *job = NULL;
4741 bool need_full_reset =
4742 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4743
4744 if (reset_context->reset_req_dev == adev)
4745 job = reset_context->job;
4746
4747 if (amdgpu_sriov_vf(adev)) {
4748 /* stop the data exchange thread */
4749 amdgpu_virt_fini_data_exchange(adev);
4750 }
4751
4752 amdgpu_fence_driver_isr_toggle(adev, true);
4753
4754 /* block all schedulers and reset given job's ring */
4755 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4756 struct amdgpu_ring *ring = adev->rings[i];
4757
4758 if (!ring || !ring->sched.thread)
4759 continue;
4760
4761 /* Clear job fence from fence drv to avoid force_completion
4762 * leave NULL and vm flush fence in fence drv
4763 */
4764 amdgpu_fence_driver_clear_job_fences(ring);
4765
4766 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4767 amdgpu_fence_driver_force_completion(ring);
4768 }
4769
4770 amdgpu_fence_driver_isr_toggle(adev, false);
4771
4772 if (job && job->vm)
4773 drm_sched_increase_karma(&job->base);
4774
4775 r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4776 /* If reset handler not implemented, continue; otherwise return */
4777 if (r == -EOPNOTSUPP)
4778 r = 0;
4779 else
4780 return r;
4781
4782 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4783 if (!amdgpu_sriov_vf(adev)) {
4784
4785 if (!need_full_reset)
4786 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4787
4788 if (!need_full_reset && amdgpu_gpu_recovery &&
4789 amdgpu_device_ip_check_soft_reset(adev)) {
4790 amdgpu_device_ip_pre_soft_reset(adev);
4791 r = amdgpu_device_ip_soft_reset(adev);
4792 amdgpu_device_ip_post_soft_reset(adev);
4793 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4794 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4795 need_full_reset = true;
4796 }
4797 }
4798
4799 if (need_full_reset)
4800 r = amdgpu_device_ip_suspend(adev);
4801 if (need_full_reset)
4802 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4803 else
4804 clear_bit(AMDGPU_NEED_FULL_RESET,
4805 &reset_context->flags);
4806 }
4807
4808 return r;
4809 }
4810
amdgpu_reset_reg_dumps(struct amdgpu_device * adev)4811 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
4812 {
4813 int i;
4814
4815 lockdep_assert_held(&adev->reset_domain->sem);
4816
4817 for (i = 0; i < adev->num_regs; i++) {
4818 adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]);
4819 trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
4820 adev->reset_dump_reg_value[i]);
4821 }
4822
4823 return 0;
4824 }
4825
4826 #ifdef CONFIG_DEV_COREDUMP
amdgpu_devcoredump_read(char * buffer,loff_t offset,size_t count,void * data,size_t datalen)4827 static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
4828 size_t count, void *data, size_t datalen)
4829 {
4830 struct drm_printer p;
4831 struct amdgpu_device *adev = data;
4832 struct drm_print_iterator iter;
4833 int i;
4834
4835 iter.data = buffer;
4836 iter.offset = 0;
4837 iter.start = offset;
4838 iter.remain = count;
4839
4840 p = drm_coredump_printer(&iter);
4841
4842 drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
4843 drm_printf(&p, "kernel: " UTS_RELEASE "\n");
4844 drm_printf(&p, "module: " KBUILD_MODNAME "\n");
4845 drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec);
4846 if (adev->reset_task_info.pid)
4847 drm_printf(&p, "process_name: %s PID: %d\n",
4848 adev->reset_task_info.process_name,
4849 adev->reset_task_info.pid);
4850
4851 if (adev->reset_vram_lost)
4852 drm_printf(&p, "VRAM is lost due to GPU reset!\n");
4853 if (adev->num_regs) {
4854 drm_printf(&p, "AMDGPU register dumps:\nOffset: Value:\n");
4855
4856 for (i = 0; i < adev->num_regs; i++)
4857 drm_printf(&p, "0x%08x: 0x%08x\n",
4858 adev->reset_dump_reg_list[i],
4859 adev->reset_dump_reg_value[i]);
4860 }
4861
4862 return count - iter.remain;
4863 }
4864
amdgpu_devcoredump_free(void * data)4865 static void amdgpu_devcoredump_free(void *data)
4866 {
4867 }
4868
amdgpu_reset_capture_coredumpm(struct amdgpu_device * adev)4869 static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
4870 {
4871 struct drm_device *dev = adev_to_drm(adev);
4872
4873 ktime_get_ts64(&adev->reset_time);
4874 dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_NOWAIT,
4875 amdgpu_devcoredump_read, amdgpu_devcoredump_free);
4876 }
4877 #endif
4878
amdgpu_do_asic_reset(struct list_head * device_list_handle,struct amdgpu_reset_context * reset_context)4879 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4880 struct amdgpu_reset_context *reset_context)
4881 {
4882 struct amdgpu_device *tmp_adev = NULL;
4883 bool need_full_reset, skip_hw_reset, vram_lost = false;
4884 int r = 0;
4885 bool gpu_reset_for_dev_remove = 0;
4886
4887 /* Try reset handler method first */
4888 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4889 reset_list);
4890 amdgpu_reset_reg_dumps(tmp_adev);
4891
4892 reset_context->reset_device_list = device_list_handle;
4893 r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4894 /* If reset handler not implemented, continue; otherwise return */
4895 if (r == -EOPNOTSUPP)
4896 r = 0;
4897 else
4898 return r;
4899
4900 /* Reset handler not implemented, use the default method */
4901 need_full_reset =
4902 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4903 skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4904
4905 gpu_reset_for_dev_remove =
4906 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
4907 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4908
4909 /*
4910 * ASIC reset has to be done on all XGMI hive nodes ASAP
4911 * to allow proper links negotiation in FW (within 1 sec)
4912 */
4913 if (!skip_hw_reset && need_full_reset) {
4914 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4915 /* For XGMI run all resets in parallel to speed up the process */
4916 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4917 tmp_adev->gmc.xgmi.pending_reset = false;
4918 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4919 r = -EALREADY;
4920 } else
4921 r = amdgpu_asic_reset(tmp_adev);
4922
4923 if (r) {
4924 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4925 r, adev_to_drm(tmp_adev)->unique);
4926 break;
4927 }
4928 }
4929
4930 /* For XGMI wait for all resets to complete before proceed */
4931 if (!r) {
4932 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4933 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4934 flush_work(&tmp_adev->xgmi_reset_work);
4935 r = tmp_adev->asic_reset_res;
4936 if (r)
4937 break;
4938 }
4939 }
4940 }
4941 }
4942
4943 if (!r && amdgpu_ras_intr_triggered()) {
4944 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4945 if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
4946 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
4947 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
4948 }
4949
4950 amdgpu_ras_intr_cleared();
4951 }
4952
4953 /* Since the mode1 reset affects base ip blocks, the
4954 * phase1 ip blocks need to be resumed. Otherwise there
4955 * will be a BIOS signature error and the psp bootloader
4956 * can't load kdb on the next amdgpu install.
4957 */
4958 if (gpu_reset_for_dev_remove) {
4959 list_for_each_entry(tmp_adev, device_list_handle, reset_list)
4960 amdgpu_device_ip_resume_phase1(tmp_adev);
4961
4962 goto end;
4963 }
4964
4965 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4966 if (need_full_reset) {
4967 /* post card */
4968 r = amdgpu_device_asic_init(tmp_adev);
4969 if (r) {
4970 dev_warn(tmp_adev->dev, "asic atom init failed!");
4971 } else {
4972 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4973
4974 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4975 if (r)
4976 goto out;
4977
4978 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4979 #ifdef CONFIG_DEV_COREDUMP
4980 tmp_adev->reset_vram_lost = vram_lost;
4981 memset(&tmp_adev->reset_task_info, 0,
4982 sizeof(tmp_adev->reset_task_info));
4983 if (reset_context->job && reset_context->job->vm)
4984 tmp_adev->reset_task_info =
4985 reset_context->job->vm->task_info;
4986 amdgpu_reset_capture_coredumpm(tmp_adev);
4987 #endif
4988 if (vram_lost) {
4989 DRM_INFO("VRAM is lost due to GPU reset!\n");
4990 amdgpu_inc_vram_lost(tmp_adev);
4991 }
4992
4993 r = amdgpu_device_fw_loading(tmp_adev);
4994 if (r)
4995 return r;
4996
4997 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4998 if (r)
4999 goto out;
5000
5001 if (vram_lost)
5002 amdgpu_device_fill_reset_magic(tmp_adev);
5003
5004 /*
5005 * Add this ASIC as tracked as reset was already
5006 * complete successfully.
5007 */
5008 amdgpu_register_gpu_instance(tmp_adev);
5009
5010 if (!reset_context->hive &&
5011 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5012 amdgpu_xgmi_add_device(tmp_adev);
5013
5014 r = amdgpu_device_ip_late_init(tmp_adev);
5015 if (r)
5016 goto out;
5017
5018 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
5019
5020 /*
5021 * The GPU enters bad state once faulty pages
5022 * by ECC has reached the threshold, and ras
5023 * recovery is scheduled next. So add one check
5024 * here to break recovery if it indeed exceeds
5025 * bad page threshold, and remind user to
5026 * retire this GPU or setting one bigger
5027 * bad_page_threshold value to fix this once
5028 * probing driver again.
5029 */
5030 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
5031 /* must succeed. */
5032 amdgpu_ras_resume(tmp_adev);
5033 } else {
5034 r = -EINVAL;
5035 goto out;
5036 }
5037
5038 /* Update PSP FW topology after reset */
5039 if (reset_context->hive &&
5040 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5041 r = amdgpu_xgmi_update_topology(
5042 reset_context->hive, tmp_adev);
5043 }
5044 }
5045
5046 out:
5047 if (!r) {
5048 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
5049 r = amdgpu_ib_ring_tests(tmp_adev);
5050 if (r) {
5051 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
5052 need_full_reset = true;
5053 r = -EAGAIN;
5054 goto end;
5055 }
5056 }
5057
5058 if (!r)
5059 r = amdgpu_device_recover_vram(tmp_adev);
5060 else
5061 tmp_adev->asic_reset_res = r;
5062 }
5063
5064 end:
5065 if (need_full_reset)
5066 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5067 else
5068 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5069 return r;
5070 }
5071
amdgpu_device_set_mp1_state(struct amdgpu_device * adev)5072 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5073 {
5074
5075 switch (amdgpu_asic_reset_method(adev)) {
5076 case AMD_RESET_METHOD_MODE1:
5077 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5078 break;
5079 case AMD_RESET_METHOD_MODE2:
5080 adev->mp1_state = PP_MP1_STATE_RESET;
5081 break;
5082 default:
5083 adev->mp1_state = PP_MP1_STATE_NONE;
5084 break;
5085 }
5086 }
5087
amdgpu_device_unset_mp1_state(struct amdgpu_device * adev)5088 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5089 {
5090 amdgpu_vf_error_trans_all(adev);
5091 adev->mp1_state = PP_MP1_STATE_NONE;
5092 }
5093
amdgpu_device_resume_display_audio(struct amdgpu_device * adev)5094 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5095 {
5096 struct pci_dev *p = NULL;
5097
5098 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5099 adev->pdev->bus->number, 1);
5100 if (p) {
5101 pm_runtime_enable(&(p->dev));
5102 pm_runtime_resume(&(p->dev));
5103 }
5104
5105 pci_dev_put(p);
5106 }
5107
amdgpu_device_suspend_display_audio(struct amdgpu_device * adev)5108 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5109 {
5110 enum amd_reset_method reset_method;
5111 struct pci_dev *p = NULL;
5112 u64 expires;
5113
5114 /*
5115 * For now, only BACO and mode1 reset are confirmed
5116 * to suffer the audio issue without proper suspended.
5117 */
5118 reset_method = amdgpu_asic_reset_method(adev);
5119 if ((reset_method != AMD_RESET_METHOD_BACO) &&
5120 (reset_method != AMD_RESET_METHOD_MODE1))
5121 return -EINVAL;
5122
5123 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5124 adev->pdev->bus->number, 1);
5125 if (!p)
5126 return -ENODEV;
5127
5128 expires = pm_runtime_autosuspend_expiration(&(p->dev));
5129 if (!expires)
5130 /*
5131 * If we cannot get the audio device autosuspend delay,
5132 * a fixed 4S interval will be used. Considering 3S is
5133 * the audio controller default autosuspend delay setting.
5134 * 4S used here is guaranteed to cover that.
5135 */
5136 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5137
5138 while (!pm_runtime_status_suspended(&(p->dev))) {
5139 if (!pm_runtime_suspend(&(p->dev)))
5140 break;
5141
5142 if (expires < ktime_get_mono_fast_ns()) {
5143 dev_warn(adev->dev, "failed to suspend display audio\n");
5144 pci_dev_put(p);
5145 /* TODO: abort the succeeding gpu reset? */
5146 return -ETIMEDOUT;
5147 }
5148 }
5149
5150 pm_runtime_disable(&(p->dev));
5151
5152 pci_dev_put(p);
5153 return 0;
5154 }
5155
amdgpu_device_stop_pending_resets(struct amdgpu_device * adev)5156 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5157 {
5158 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5159
5160 #if defined(CONFIG_DEBUG_FS)
5161 if (!amdgpu_sriov_vf(adev))
5162 cancel_work(&adev->reset_work);
5163 #endif
5164
5165 if (adev->kfd.dev)
5166 cancel_work(&adev->kfd.reset_work);
5167
5168 if (amdgpu_sriov_vf(adev))
5169 cancel_work(&adev->virt.flr_work);
5170
5171 if (con && adev->ras_enabled)
5172 cancel_work(&con->recovery_work);
5173
5174 }
5175
5176 /**
5177 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5178 *
5179 * @adev: amdgpu_device pointer
5180 * @job: which job trigger hang
5181 * @reset_context: amdgpu reset context pointer
5182 *
5183 * Attempt to reset the GPU if it has hung (all asics).
5184 * Attempt to do soft-reset or full-reset and reinitialize Asic
5185 * Returns 0 for success or an error on failure.
5186 */
5187
amdgpu_device_gpu_recover(struct amdgpu_device * adev,struct amdgpu_job * job,struct amdgpu_reset_context * reset_context)5188 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5189 struct amdgpu_job *job,
5190 struct amdgpu_reset_context *reset_context)
5191 {
5192 struct list_head device_list, *device_list_handle = NULL;
5193 bool job_signaled = false;
5194 struct amdgpu_hive_info *hive = NULL;
5195 struct amdgpu_device *tmp_adev = NULL;
5196 int i, r = 0;
5197 bool need_emergency_restart = false;
5198 bool audio_suspended = false;
5199 bool gpu_reset_for_dev_remove = false;
5200
5201 gpu_reset_for_dev_remove =
5202 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
5203 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5204
5205 /*
5206 * Special case: RAS triggered and full reset isn't supported
5207 */
5208 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5209
5210 /*
5211 * Flush RAM to disk so that after reboot
5212 * the user can read log and see why the system rebooted.
5213 */
5214 if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
5215 amdgpu_ras_get_context(adev)->reboot) {
5216 DRM_WARN("Emergency reboot.");
5217
5218 ksys_sync_helper();
5219 emergency_restart();
5220 }
5221
5222 dev_info(adev->dev, "GPU %s begin!\n",
5223 need_emergency_restart ? "jobs stop":"reset");
5224
5225 if (!amdgpu_sriov_vf(adev))
5226 hive = amdgpu_get_xgmi_hive(adev);
5227 if (hive)
5228 mutex_lock(&hive->hive_lock);
5229
5230 reset_context->job = job;
5231 reset_context->hive = hive;
5232 /*
5233 * Build list of devices to reset.
5234 * In case we are in XGMI hive mode, resort the device list
5235 * to put adev in the 1st position.
5236 */
5237 INIT_LIST_HEAD(&device_list);
5238 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5239 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5240 list_add_tail(&tmp_adev->reset_list, &device_list);
5241 if (gpu_reset_for_dev_remove && adev->shutdown)
5242 tmp_adev->shutdown = true;
5243 }
5244 if (!list_is_first(&adev->reset_list, &device_list))
5245 list_rotate_to_front(&adev->reset_list, &device_list);
5246 device_list_handle = &device_list;
5247 } else {
5248 list_add_tail(&adev->reset_list, &device_list);
5249 device_list_handle = &device_list;
5250 }
5251
5252 /* We need to lock reset domain only once both for XGMI and single device */
5253 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5254 reset_list);
5255 amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5256
5257 /* block all schedulers and reset given job's ring */
5258 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5259
5260 amdgpu_device_set_mp1_state(tmp_adev);
5261
5262 /*
5263 * Try to put the audio codec into suspend state
5264 * before gpu reset started.
5265 *
5266 * Due to the power domain of the graphics device
5267 * is shared with AZ power domain. Without this,
5268 * we may change the audio hardware from behind
5269 * the audio driver's back. That will trigger
5270 * some audio codec errors.
5271 */
5272 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5273 audio_suspended = true;
5274
5275 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5276
5277 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5278
5279 if (!amdgpu_sriov_vf(tmp_adev))
5280 amdgpu_amdkfd_pre_reset(tmp_adev);
5281
5282 /*
5283 * Mark these ASICs to be reseted as untracked first
5284 * And add them back after reset completed
5285 */
5286 amdgpu_unregister_gpu_instance(tmp_adev);
5287
5288 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5289
5290 /* disable ras on ALL IPs */
5291 if (!need_emergency_restart &&
5292 amdgpu_device_ip_need_full_reset(tmp_adev))
5293 amdgpu_ras_suspend(tmp_adev);
5294
5295 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5296 struct amdgpu_ring *ring = tmp_adev->rings[i];
5297
5298 if (!ring || !ring->sched.thread)
5299 continue;
5300
5301 drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5302
5303 if (need_emergency_restart)
5304 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5305 }
5306 atomic_inc(&tmp_adev->gpu_reset_counter);
5307 }
5308
5309 if (need_emergency_restart)
5310 goto skip_sched_resume;
5311
5312 /*
5313 * Must check guilty signal here since after this point all old
5314 * HW fences are force signaled.
5315 *
5316 * job->base holds a reference to parent fence
5317 */
5318 if (job && dma_fence_is_signaled(&job->hw_fence)) {
5319 job_signaled = true;
5320 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5321 goto skip_hw_reset;
5322 }
5323
5324 retry: /* Rest of adevs pre asic reset from XGMI hive. */
5325 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5326 if (gpu_reset_for_dev_remove) {
5327 /* Workaroud for ASICs need to disable SMC first */
5328 amdgpu_device_smu_fini_early(tmp_adev);
5329 }
5330 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5331 /*TODO Should we stop ?*/
5332 if (r) {
5333 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5334 r, adev_to_drm(tmp_adev)->unique);
5335 tmp_adev->asic_reset_res = r;
5336 }
5337
5338 /*
5339 * Drop all pending non scheduler resets. Scheduler resets
5340 * were already dropped during drm_sched_stop
5341 */
5342 amdgpu_device_stop_pending_resets(tmp_adev);
5343 }
5344
5345 /* Actual ASIC resets if needed.*/
5346 /* Host driver will handle XGMI hive reset for SRIOV */
5347 if (amdgpu_sriov_vf(adev)) {
5348 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5349 if (r)
5350 adev->asic_reset_res = r;
5351
5352 /* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */
5353 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
5354 adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3))
5355 amdgpu_ras_resume(adev);
5356 } else {
5357 r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5358 if (r && r == -EAGAIN)
5359 goto retry;
5360
5361 if (!r && gpu_reset_for_dev_remove)
5362 goto recover_end;
5363 }
5364
5365 skip_hw_reset:
5366
5367 /* Post ASIC reset for all devs .*/
5368 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5369
5370 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5371 struct amdgpu_ring *ring = tmp_adev->rings[i];
5372
5373 if (!ring || !ring->sched.thread)
5374 continue;
5375
5376 drm_sched_start(&ring->sched, true);
5377 }
5378
5379 if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))
5380 amdgpu_mes_self_test(tmp_adev);
5381
5382 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
5383 drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5384
5385 if (tmp_adev->asic_reset_res)
5386 r = tmp_adev->asic_reset_res;
5387
5388 tmp_adev->asic_reset_res = 0;
5389
5390 if (r) {
5391 /* bad news, how to tell it to userspace ? */
5392 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5393 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5394 } else {
5395 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5396 if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5397 DRM_WARN("smart shift update failed\n");
5398 }
5399 }
5400
5401 skip_sched_resume:
5402 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5403 /* unlock kfd: SRIOV would do it separately */
5404 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5405 amdgpu_amdkfd_post_reset(tmp_adev);
5406
5407 /* kfd_post_reset will do nothing if kfd device is not initialized,
5408 * need to bring up kfd here if it's not be initialized before
5409 */
5410 if (!adev->kfd.init_complete)
5411 amdgpu_amdkfd_device_init(adev);
5412
5413 if (audio_suspended)
5414 amdgpu_device_resume_display_audio(tmp_adev);
5415
5416 amdgpu_device_unset_mp1_state(tmp_adev);
5417
5418 amdgpu_ras_set_error_query_ready(tmp_adev, true);
5419 }
5420
5421 recover_end:
5422 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5423 reset_list);
5424 amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5425
5426 if (hive) {
5427 mutex_unlock(&hive->hive_lock);
5428 amdgpu_put_xgmi_hive(hive);
5429 }
5430
5431 if (r)
5432 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5433
5434 atomic_set(&adev->reset_domain->reset_res, r);
5435 return r;
5436 }
5437
5438 /**
5439 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5440 *
5441 * @adev: amdgpu_device pointer
5442 *
5443 * Fetchs and stores in the driver the PCIE capabilities (gen speed
5444 * and lanes) of the slot the device is in. Handles APUs and
5445 * virtualized environments where PCIE config space may not be available.
5446 */
amdgpu_device_get_pcie_info(struct amdgpu_device * adev)5447 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5448 {
5449 struct pci_dev *pdev;
5450 enum pci_bus_speed speed_cap, platform_speed_cap;
5451 enum pcie_link_width platform_link_width;
5452
5453 if (amdgpu_pcie_gen_cap)
5454 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5455
5456 if (amdgpu_pcie_lane_cap)
5457 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5458
5459 /* covers APUs as well */
5460 if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) {
5461 if (adev->pm.pcie_gen_mask == 0)
5462 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5463 if (adev->pm.pcie_mlw_mask == 0)
5464 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5465 return;
5466 }
5467
5468 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5469 return;
5470
5471 pcie_bandwidth_available(adev->pdev, NULL,
5472 &platform_speed_cap, &platform_link_width);
5473
5474 if (adev->pm.pcie_gen_mask == 0) {
5475 /* asic caps */
5476 pdev = adev->pdev;
5477 speed_cap = pcie_get_speed_cap(pdev);
5478 if (speed_cap == PCI_SPEED_UNKNOWN) {
5479 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5480 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5481 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5482 } else {
5483 if (speed_cap == PCIE_SPEED_32_0GT)
5484 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5485 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5486 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5487 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5488 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5489 else if (speed_cap == PCIE_SPEED_16_0GT)
5490 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5491 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5492 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5493 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5494 else if (speed_cap == PCIE_SPEED_8_0GT)
5495 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5496 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5497 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5498 else if (speed_cap == PCIE_SPEED_5_0GT)
5499 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5500 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5501 else
5502 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5503 }
5504 /* platform caps */
5505 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5506 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5507 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5508 } else {
5509 if (platform_speed_cap == PCIE_SPEED_32_0GT)
5510 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5511 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5512 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5513 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5514 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5515 else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5516 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5517 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5518 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5519 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5520 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5521 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5522 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5523 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5524 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5525 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5526 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5527 else
5528 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5529
5530 }
5531 }
5532 if (adev->pm.pcie_mlw_mask == 0) {
5533 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5534 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5535 } else {
5536 switch (platform_link_width) {
5537 case PCIE_LNK_X32:
5538 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5539 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5540 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5541 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5542 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5543 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5544 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5545 break;
5546 case PCIE_LNK_X16:
5547 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5548 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5549 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5550 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5551 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5552 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5553 break;
5554 case PCIE_LNK_X12:
5555 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5556 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5557 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5558 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5559 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5560 break;
5561 case PCIE_LNK_X8:
5562 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5563 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5564 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5565 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5566 break;
5567 case PCIE_LNK_X4:
5568 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5569 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5570 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5571 break;
5572 case PCIE_LNK_X2:
5573 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5574 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5575 break;
5576 case PCIE_LNK_X1:
5577 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5578 break;
5579 default:
5580 break;
5581 }
5582 }
5583 }
5584 }
5585
5586 /**
5587 * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5588 *
5589 * @adev: amdgpu_device pointer
5590 * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5591 *
5592 * Return true if @peer_adev can access (DMA) @adev through the PCIe
5593 * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5594 * @peer_adev.
5595 */
amdgpu_device_is_peer_accessible(struct amdgpu_device * adev,struct amdgpu_device * peer_adev)5596 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5597 struct amdgpu_device *peer_adev)
5598 {
5599 #ifdef CONFIG_HSA_AMD_P2P
5600 uint64_t address_mask = peer_adev->dev->dma_mask ?
5601 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5602 resource_size_t aper_limit =
5603 adev->gmc.aper_base + adev->gmc.aper_size - 1;
5604 bool p2p_access =
5605 !adev->gmc.xgmi.connected_to_cpu &&
5606 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
5607
5608 return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5609 adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5610 !(adev->gmc.aper_base & address_mask ||
5611 aper_limit & address_mask));
5612 #else
5613 return false;
5614 #endif
5615 }
5616
amdgpu_device_baco_enter(struct drm_device * dev)5617 int amdgpu_device_baco_enter(struct drm_device *dev)
5618 {
5619 struct amdgpu_device *adev = drm_to_adev(dev);
5620 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5621
5622 if (!amdgpu_device_supports_baco(dev))
5623 return -ENOTSUPP;
5624
5625 if (ras && adev->ras_enabled &&
5626 adev->nbio.funcs->enable_doorbell_interrupt)
5627 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5628
5629 return amdgpu_dpm_baco_enter(adev);
5630 }
5631
amdgpu_device_baco_exit(struct drm_device * dev)5632 int amdgpu_device_baco_exit(struct drm_device *dev)
5633 {
5634 struct amdgpu_device *adev = drm_to_adev(dev);
5635 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5636 int ret = 0;
5637
5638 if (!amdgpu_device_supports_baco(dev))
5639 return -ENOTSUPP;
5640
5641 ret = amdgpu_dpm_baco_exit(adev);
5642 if (ret)
5643 return ret;
5644
5645 if (ras && adev->ras_enabled &&
5646 adev->nbio.funcs->enable_doorbell_interrupt)
5647 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5648
5649 if (amdgpu_passthrough(adev) && adev->nbio.funcs &&
5650 adev->nbio.funcs->clear_doorbell_interrupt)
5651 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5652
5653 return 0;
5654 }
5655
5656 /**
5657 * amdgpu_pci_error_detected - Called when a PCI error is detected.
5658 * @pdev: PCI device struct
5659 * @state: PCI channel state
5660 *
5661 * Description: Called when a PCI error is detected.
5662 *
5663 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5664 */
amdgpu_pci_error_detected(struct pci_dev * pdev,pci_channel_state_t state)5665 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5666 {
5667 struct drm_device *dev = pci_get_drvdata(pdev);
5668 struct amdgpu_device *adev = drm_to_adev(dev);
5669 int i;
5670
5671 DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5672
5673 if (adev->gmc.xgmi.num_physical_nodes > 1) {
5674 DRM_WARN("No support for XGMI hive yet...");
5675 return PCI_ERS_RESULT_DISCONNECT;
5676 }
5677
5678 adev->pci_channel_state = state;
5679
5680 switch (state) {
5681 case pci_channel_io_normal:
5682 return PCI_ERS_RESULT_CAN_RECOVER;
5683 /* Fatal error, prepare for slot reset */
5684 case pci_channel_io_frozen:
5685 /*
5686 * Locking adev->reset_domain->sem will prevent any external access
5687 * to GPU during PCI error recovery
5688 */
5689 amdgpu_device_lock_reset_domain(adev->reset_domain);
5690 amdgpu_device_set_mp1_state(adev);
5691
5692 /*
5693 * Block any work scheduling as we do for regular GPU reset
5694 * for the duration of the recovery
5695 */
5696 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5697 struct amdgpu_ring *ring = adev->rings[i];
5698
5699 if (!ring || !ring->sched.thread)
5700 continue;
5701
5702 drm_sched_stop(&ring->sched, NULL);
5703 }
5704 atomic_inc(&adev->gpu_reset_counter);
5705 return PCI_ERS_RESULT_NEED_RESET;
5706 case pci_channel_io_perm_failure:
5707 /* Permanent error, prepare for device removal */
5708 return PCI_ERS_RESULT_DISCONNECT;
5709 }
5710
5711 return PCI_ERS_RESULT_NEED_RESET;
5712 }
5713
5714 /**
5715 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5716 * @pdev: pointer to PCI device
5717 */
amdgpu_pci_mmio_enabled(struct pci_dev * pdev)5718 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5719 {
5720
5721 DRM_INFO("PCI error: mmio enabled callback!!\n");
5722
5723 /* TODO - dump whatever for debugging purposes */
5724
5725 /* This called only if amdgpu_pci_error_detected returns
5726 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5727 * works, no need to reset slot.
5728 */
5729
5730 return PCI_ERS_RESULT_RECOVERED;
5731 }
5732
5733 /**
5734 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5735 * @pdev: PCI device struct
5736 *
5737 * Description: This routine is called by the pci error recovery
5738 * code after the PCI slot has been reset, just before we
5739 * should resume normal operations.
5740 */
amdgpu_pci_slot_reset(struct pci_dev * pdev)5741 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5742 {
5743 struct drm_device *dev = pci_get_drvdata(pdev);
5744 struct amdgpu_device *adev = drm_to_adev(dev);
5745 int r, i;
5746 struct amdgpu_reset_context reset_context;
5747 u32 memsize;
5748 struct list_head device_list;
5749
5750 DRM_INFO("PCI error: slot reset callback!!\n");
5751
5752 memset(&reset_context, 0, sizeof(reset_context));
5753
5754 INIT_LIST_HEAD(&device_list);
5755 list_add_tail(&adev->reset_list, &device_list);
5756
5757 /* wait for asic to come out of reset */
5758 msleep(500);
5759
5760 /* Restore PCI confspace */
5761 amdgpu_device_load_pci_state(pdev);
5762
5763 /* confirm ASIC came out of reset */
5764 for (i = 0; i < adev->usec_timeout; i++) {
5765 memsize = amdgpu_asic_get_config_memsize(adev);
5766
5767 if (memsize != 0xffffffff)
5768 break;
5769 udelay(1);
5770 }
5771 if (memsize == 0xffffffff) {
5772 r = -ETIME;
5773 goto out;
5774 }
5775
5776 reset_context.method = AMD_RESET_METHOD_NONE;
5777 reset_context.reset_req_dev = adev;
5778 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5779 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5780
5781 adev->no_hw_access = true;
5782 r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5783 adev->no_hw_access = false;
5784 if (r)
5785 goto out;
5786
5787 r = amdgpu_do_asic_reset(&device_list, &reset_context);
5788
5789 out:
5790 if (!r) {
5791 if (amdgpu_device_cache_pci_state(adev->pdev))
5792 pci_restore_state(adev->pdev);
5793
5794 DRM_INFO("PCIe error recovery succeeded\n");
5795 } else {
5796 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5797 amdgpu_device_unset_mp1_state(adev);
5798 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5799 }
5800
5801 return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5802 }
5803
5804 /**
5805 * amdgpu_pci_resume() - resume normal ops after PCI reset
5806 * @pdev: pointer to PCI device
5807 *
5808 * Called when the error recovery driver tells us that its
5809 * OK to resume normal operation.
5810 */
amdgpu_pci_resume(struct pci_dev * pdev)5811 void amdgpu_pci_resume(struct pci_dev *pdev)
5812 {
5813 struct drm_device *dev = pci_get_drvdata(pdev);
5814 struct amdgpu_device *adev = drm_to_adev(dev);
5815 int i;
5816
5817
5818 DRM_INFO("PCI error: resume callback!!\n");
5819
5820 /* Only continue execution for the case of pci_channel_io_frozen */
5821 if (adev->pci_channel_state != pci_channel_io_frozen)
5822 return;
5823
5824 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5825 struct amdgpu_ring *ring = adev->rings[i];
5826
5827 if (!ring || !ring->sched.thread)
5828 continue;
5829
5830 drm_sched_start(&ring->sched, true);
5831 }
5832
5833 amdgpu_device_unset_mp1_state(adev);
5834 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5835 }
5836
amdgpu_device_cache_pci_state(struct pci_dev * pdev)5837 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5838 {
5839 struct drm_device *dev = pci_get_drvdata(pdev);
5840 struct amdgpu_device *adev = drm_to_adev(dev);
5841 int r;
5842
5843 r = pci_save_state(pdev);
5844 if (!r) {
5845 kfree(adev->pci_state);
5846
5847 adev->pci_state = pci_store_saved_state(pdev);
5848
5849 if (!adev->pci_state) {
5850 DRM_ERROR("Failed to store PCI saved state");
5851 return false;
5852 }
5853 } else {
5854 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5855 return false;
5856 }
5857
5858 return true;
5859 }
5860
amdgpu_device_load_pci_state(struct pci_dev * pdev)5861 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5862 {
5863 struct drm_device *dev = pci_get_drvdata(pdev);
5864 struct amdgpu_device *adev = drm_to_adev(dev);
5865 int r;
5866
5867 if (!adev->pci_state)
5868 return false;
5869
5870 r = pci_load_saved_state(pdev, adev->pci_state);
5871
5872 if (!r) {
5873 pci_restore_state(pdev);
5874 } else {
5875 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5876 return false;
5877 }
5878
5879 return true;
5880 }
5881
amdgpu_device_flush_hdp(struct amdgpu_device * adev,struct amdgpu_ring * ring)5882 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5883 struct amdgpu_ring *ring)
5884 {
5885 #ifdef CONFIG_X86_64
5886 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5887 return;
5888 #endif
5889 if (adev->gmc.xgmi.connected_to_cpu)
5890 return;
5891
5892 if (ring && ring->funcs->emit_hdp_flush)
5893 amdgpu_ring_emit_hdp_flush(ring);
5894 else
5895 amdgpu_asic_flush_hdp(adev, ring);
5896 }
5897
amdgpu_device_invalidate_hdp(struct amdgpu_device * adev,struct amdgpu_ring * ring)5898 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5899 struct amdgpu_ring *ring)
5900 {
5901 #ifdef CONFIG_X86_64
5902 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5903 return;
5904 #endif
5905 if (adev->gmc.xgmi.connected_to_cpu)
5906 return;
5907
5908 amdgpu_asic_invalidate_hdp(adev, ring);
5909 }
5910
amdgpu_in_reset(struct amdgpu_device * adev)5911 int amdgpu_in_reset(struct amdgpu_device *adev)
5912 {
5913 return atomic_read(&adev->reset_domain->in_gpu_reset);
5914 }
5915
5916 /**
5917 * amdgpu_device_halt() - bring hardware to some kind of halt state
5918 *
5919 * @adev: amdgpu_device pointer
5920 *
5921 * Bring hardware to some kind of halt state so that no one can touch it
5922 * any more. It will help to maintain error context when error occurred.
5923 * Compare to a simple hang, the system will keep stable at least for SSH
5924 * access. Then it should be trivial to inspect the hardware state and
5925 * see what's going on. Implemented as following:
5926 *
5927 * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
5928 * clears all CPU mappings to device, disallows remappings through page faults
5929 * 2. amdgpu_irq_disable_all() disables all interrupts
5930 * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
5931 * 4. set adev->no_hw_access to avoid potential crashes after setp 5
5932 * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
5933 * 6. pci_disable_device() and pci_wait_for_pending_transaction()
5934 * flush any in flight DMA operations
5935 */
amdgpu_device_halt(struct amdgpu_device * adev)5936 void amdgpu_device_halt(struct amdgpu_device *adev)
5937 {
5938 struct pci_dev *pdev = adev->pdev;
5939 struct drm_device *ddev = adev_to_drm(adev);
5940
5941 amdgpu_xcp_dev_unplug(adev);
5942 drm_dev_unplug(ddev);
5943
5944 amdgpu_irq_disable_all(adev);
5945
5946 amdgpu_fence_driver_hw_fini(adev);
5947
5948 adev->no_hw_access = true;
5949
5950 amdgpu_device_unmap_mmio(adev);
5951
5952 pci_disable_device(pdev);
5953 pci_wait_for_pending_transaction(pdev);
5954 }
5955
amdgpu_device_pcie_port_rreg(struct amdgpu_device * adev,u32 reg)5956 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
5957 u32 reg)
5958 {
5959 unsigned long flags, address, data;
5960 u32 r;
5961
5962 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5963 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5964
5965 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5966 WREG32(address, reg * 4);
5967 (void)RREG32(address);
5968 r = RREG32(data);
5969 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5970 return r;
5971 }
5972
amdgpu_device_pcie_port_wreg(struct amdgpu_device * adev,u32 reg,u32 v)5973 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
5974 u32 reg, u32 v)
5975 {
5976 unsigned long flags, address, data;
5977
5978 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5979 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5980
5981 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5982 WREG32(address, reg * 4);
5983 (void)RREG32(address);
5984 WREG32(data, v);
5985 (void)RREG32(data);
5986 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5987 }
5988
5989 /**
5990 * amdgpu_device_switch_gang - switch to a new gang
5991 * @adev: amdgpu_device pointer
5992 * @gang: the gang to switch to
5993 *
5994 * Try to switch to a new gang.
5995 * Returns: NULL if we switched to the new gang or a reference to the current
5996 * gang leader.
5997 */
amdgpu_device_switch_gang(struct amdgpu_device * adev,struct dma_fence * gang)5998 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
5999 struct dma_fence *gang)
6000 {
6001 struct dma_fence *old = NULL;
6002
6003 do {
6004 dma_fence_put(old);
6005 rcu_read_lock();
6006 old = dma_fence_get_rcu_safe(&adev->gang_submit);
6007 rcu_read_unlock();
6008
6009 if (old == gang)
6010 break;
6011
6012 if (!dma_fence_is_signaled(old))
6013 return old;
6014
6015 } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
6016 old, gang) != old);
6017
6018 dma_fence_put(old);
6019 return NULL;
6020 }
6021
amdgpu_device_has_display_hardware(struct amdgpu_device * adev)6022 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
6023 {
6024 switch (adev->asic_type) {
6025 #ifdef CONFIG_DRM_AMDGPU_SI
6026 case CHIP_HAINAN:
6027 #endif
6028 case CHIP_TOPAZ:
6029 /* chips with no display hardware */
6030 return false;
6031 #ifdef CONFIG_DRM_AMDGPU_SI
6032 case CHIP_TAHITI:
6033 case CHIP_PITCAIRN:
6034 case CHIP_VERDE:
6035 case CHIP_OLAND:
6036 #endif
6037 #ifdef CONFIG_DRM_AMDGPU_CIK
6038 case CHIP_BONAIRE:
6039 case CHIP_HAWAII:
6040 case CHIP_KAVERI:
6041 case CHIP_KABINI:
6042 case CHIP_MULLINS:
6043 #endif
6044 case CHIP_TONGA:
6045 case CHIP_FIJI:
6046 case CHIP_POLARIS10:
6047 case CHIP_POLARIS11:
6048 case CHIP_POLARIS12:
6049 case CHIP_VEGAM:
6050 case CHIP_CARRIZO:
6051 case CHIP_STONEY:
6052 /* chips with display hardware */
6053 return true;
6054 default:
6055 /* IP discovery */
6056 if (!adev->ip_versions[DCE_HWIP][0] ||
6057 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
6058 return false;
6059 return true;
6060 }
6061 }
6062
amdgpu_device_wait_on_rreg(struct amdgpu_device * adev,uint32_t inst,uint32_t reg_addr,char reg_name[],uint32_t expected_value,uint32_t mask)6063 uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
6064 uint32_t inst, uint32_t reg_addr, char reg_name[],
6065 uint32_t expected_value, uint32_t mask)
6066 {
6067 uint32_t ret = 0;
6068 uint32_t old_ = 0;
6069 uint32_t tmp_ = RREG32(reg_addr);
6070 uint32_t loop = adev->usec_timeout;
6071
6072 while ((tmp_ & (mask)) != (expected_value)) {
6073 if (old_ != tmp_) {
6074 loop = adev->usec_timeout;
6075 old_ = tmp_;
6076 } else
6077 udelay(1);
6078 tmp_ = RREG32(reg_addr);
6079 loop--;
6080 if (!loop) {
6081 DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
6082 inst, reg_name, (uint32_t)expected_value,
6083 (uint32_t)(tmp_ & (mask)));
6084 ret = -ETIMEDOUT;
6085 break;
6086 }
6087 }
6088 return ret;
6089 }
6090