1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/power_supply.h> 29 #include <linux/kthread.h> 30 #include <linux/module.h> 31 #include <linux/console.h> 32 #include <linux/slab.h> 33 #include <linux/iommu.h> 34 #include <linux/pci.h> 35 #include <linux/devcoredump.h> 36 #include <generated/utsrelease.h> 37 #include <linux/pci-p2pdma.h> 38 39 #include <drm/drm_aperture.h> 40 #include <drm/drm_atomic_helper.h> 41 #include <drm/drm_fb_helper.h> 42 #include <drm/drm_probe_helper.h> 43 #include <drm/amdgpu_drm.h> 44 #include <linux/vgaarb.h> 45 #include <linux/vga_switcheroo.h> 46 #include <linux/efi.h> 47 #include "amdgpu.h" 48 #include "amdgpu_trace.h" 49 #include "amdgpu_i2c.h" 50 #include "atom.h" 51 #include "amdgpu_atombios.h" 52 #include "amdgpu_atomfirmware.h" 53 #include "amd_pcie.h" 54 #ifdef CONFIG_DRM_AMDGPU_SI 55 #include "si.h" 56 #endif 57 #ifdef CONFIG_DRM_AMDGPU_CIK 58 #include "cik.h" 59 #endif 60 #include "vi.h" 61 #include "soc15.h" 62 #include "nv.h" 63 #include "bif/bif_4_1_d.h" 64 #include <linux/firmware.h> 65 #include "amdgpu_vf_error.h" 66 67 #include "amdgpu_amdkfd.h" 68 #include "amdgpu_pm.h" 69 70 #include "amdgpu_xgmi.h" 71 #include "amdgpu_ras.h" 72 #include "amdgpu_pmu.h" 73 #include "amdgpu_fru_eeprom.h" 74 #include "amdgpu_reset.h" 75 76 #include <linux/suspend.h> 77 #include <drm/task_barrier.h> 78 #include <linux/pm_runtime.h> 79 80 #include <drm/drm_drv.h> 81 82 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); 83 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); 84 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); 85 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin"); 86 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin"); 87 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin"); 88 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin"); 89 90 #define AMDGPU_RESUME_MS 2000 91 #define AMDGPU_MAX_RETRY_LIMIT 2 92 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL) 93 94 static const struct drm_driver amdgpu_kms_driver; 95 96 const char *amdgpu_asic_name[] = { 97 "TAHITI", 98 "PITCAIRN", 99 "VERDE", 100 "OLAND", 101 "HAINAN", 102 "BONAIRE", 103 "KAVERI", 104 "KABINI", 105 "HAWAII", 106 "MULLINS", 107 "TOPAZ", 108 "TONGA", 109 "FIJI", 110 "CARRIZO", 111 "STONEY", 112 "POLARIS10", 113 "POLARIS11", 114 "POLARIS12", 115 "VEGAM", 116 "VEGA10", 117 "VEGA12", 118 "VEGA20", 119 "RAVEN", 120 "ARCTURUS", 121 "RENOIR", 122 "ALDEBARAN", 123 "NAVI10", 124 "CYAN_SKILLFISH", 125 "NAVI14", 126 "NAVI12", 127 "SIENNA_CICHLID", 128 "NAVY_FLOUNDER", 129 "VANGOGH", 130 "DIMGREY_CAVEFISH", 131 "BEIGE_GOBY", 132 "YELLOW_CARP", 133 "IP DISCOVERY", 134 "LAST", 135 }; 136 137 /** 138 * DOC: pcie_replay_count 139 * 140 * The amdgpu driver provides a sysfs API for reporting the total number 141 * of PCIe replays (NAKs) 142 * The file pcie_replay_count is used for this and returns the total 143 * number of replays as a sum of the NAKs generated and NAKs received 144 */ 145 146 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev, 147 struct device_attribute *attr, char *buf) 148 { 149 struct drm_device *ddev = dev_get_drvdata(dev); 150 struct amdgpu_device *adev = drm_to_adev(ddev); 151 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev); 152 153 return sysfs_emit(buf, "%llu\n", cnt); 154 } 155 156 static DEVICE_ATTR(pcie_replay_count, S_IRUGO, 157 amdgpu_device_get_pcie_replay_count, NULL); 158 159 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev); 160 161 /** 162 * DOC: product_name 163 * 164 * The amdgpu driver provides a sysfs API for reporting the product name 165 * for the device 166 * The file serial_number is used for this and returns the product name 167 * as returned from the FRU. 168 * NOTE: This is only available for certain server cards 169 */ 170 171 static ssize_t amdgpu_device_get_product_name(struct device *dev, 172 struct device_attribute *attr, char *buf) 173 { 174 struct drm_device *ddev = dev_get_drvdata(dev); 175 struct amdgpu_device *adev = drm_to_adev(ddev); 176 177 return sysfs_emit(buf, "%s\n", adev->product_name); 178 } 179 180 static DEVICE_ATTR(product_name, S_IRUGO, 181 amdgpu_device_get_product_name, NULL); 182 183 /** 184 * DOC: product_number 185 * 186 * The amdgpu driver provides a sysfs API for reporting the part number 187 * for the device 188 * The file serial_number is used for this and returns the part number 189 * as returned from the FRU. 190 * NOTE: This is only available for certain server cards 191 */ 192 193 static ssize_t amdgpu_device_get_product_number(struct device *dev, 194 struct device_attribute *attr, char *buf) 195 { 196 struct drm_device *ddev = dev_get_drvdata(dev); 197 struct amdgpu_device *adev = drm_to_adev(ddev); 198 199 return sysfs_emit(buf, "%s\n", adev->product_number); 200 } 201 202 static DEVICE_ATTR(product_number, S_IRUGO, 203 amdgpu_device_get_product_number, NULL); 204 205 /** 206 * DOC: serial_number 207 * 208 * The amdgpu driver provides a sysfs API for reporting the serial number 209 * for the device 210 * The file serial_number is used for this and returns the serial number 211 * as returned from the FRU. 212 * NOTE: This is only available for certain server cards 213 */ 214 215 static ssize_t amdgpu_device_get_serial_number(struct device *dev, 216 struct device_attribute *attr, char *buf) 217 { 218 struct drm_device *ddev = dev_get_drvdata(dev); 219 struct amdgpu_device *adev = drm_to_adev(ddev); 220 221 return sysfs_emit(buf, "%s\n", adev->serial); 222 } 223 224 static DEVICE_ATTR(serial_number, S_IRUGO, 225 amdgpu_device_get_serial_number, NULL); 226 227 /** 228 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control 229 * 230 * @dev: drm_device pointer 231 * 232 * Returns true if the device is a dGPU with ATPX power control, 233 * otherwise return false. 234 */ 235 bool amdgpu_device_supports_px(struct drm_device *dev) 236 { 237 struct amdgpu_device *adev = drm_to_adev(dev); 238 239 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid()) 240 return true; 241 return false; 242 } 243 244 /** 245 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources 246 * 247 * @dev: drm_device pointer 248 * 249 * Returns true if the device is a dGPU with ACPI power control, 250 * otherwise return false. 251 */ 252 bool amdgpu_device_supports_boco(struct drm_device *dev) 253 { 254 struct amdgpu_device *adev = drm_to_adev(dev); 255 256 if (adev->has_pr3 || 257 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid())) 258 return true; 259 return false; 260 } 261 262 /** 263 * amdgpu_device_supports_baco - Does the device support BACO 264 * 265 * @dev: drm_device pointer 266 * 267 * Returns true if the device supporte BACO, 268 * otherwise return false. 269 */ 270 bool amdgpu_device_supports_baco(struct drm_device *dev) 271 { 272 struct amdgpu_device *adev = drm_to_adev(dev); 273 274 return amdgpu_asic_supports_baco(adev); 275 } 276 277 /** 278 * amdgpu_device_supports_smart_shift - Is the device dGPU with 279 * smart shift support 280 * 281 * @dev: drm_device pointer 282 * 283 * Returns true if the device is a dGPU with Smart Shift support, 284 * otherwise returns false. 285 */ 286 bool amdgpu_device_supports_smart_shift(struct drm_device *dev) 287 { 288 return (amdgpu_device_supports_boco(dev) && 289 amdgpu_acpi_is_power_shift_control_supported()); 290 } 291 292 /* 293 * VRAM access helper functions 294 */ 295 296 /** 297 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA 298 * 299 * @adev: amdgpu_device pointer 300 * @pos: offset of the buffer in vram 301 * @buf: virtual address of the buffer in system memory 302 * @size: read/write size, sizeof(@buf) must > @size 303 * @write: true - write to vram, otherwise - read from vram 304 */ 305 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos, 306 void *buf, size_t size, bool write) 307 { 308 unsigned long flags; 309 uint32_t hi = ~0, tmp = 0; 310 uint32_t *data = buf; 311 uint64_t last; 312 int idx; 313 314 if (!drm_dev_enter(adev_to_drm(adev), &idx)) 315 return; 316 317 BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4)); 318 319 spin_lock_irqsave(&adev->mmio_idx_lock, flags); 320 for (last = pos + size; pos < last; pos += 4) { 321 tmp = pos >> 31; 322 323 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000); 324 if (tmp != hi) { 325 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp); 326 hi = tmp; 327 } 328 if (write) 329 WREG32_NO_KIQ(mmMM_DATA, *data++); 330 else 331 *data++ = RREG32_NO_KIQ(mmMM_DATA); 332 } 333 334 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); 335 drm_dev_exit(idx); 336 } 337 338 /** 339 * amdgpu_device_aper_access - access vram by vram aperature 340 * 341 * @adev: amdgpu_device pointer 342 * @pos: offset of the buffer in vram 343 * @buf: virtual address of the buffer in system memory 344 * @size: read/write size, sizeof(@buf) must > @size 345 * @write: true - write to vram, otherwise - read from vram 346 * 347 * The return value means how many bytes have been transferred. 348 */ 349 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos, 350 void *buf, size_t size, bool write) 351 { 352 #ifdef CONFIG_64BIT 353 void __iomem *addr; 354 size_t count = 0; 355 uint64_t last; 356 357 if (!adev->mman.aper_base_kaddr) 358 return 0; 359 360 last = min(pos + size, adev->gmc.visible_vram_size); 361 if (last > pos) { 362 addr = adev->mman.aper_base_kaddr + pos; 363 count = last - pos; 364 365 if (write) { 366 memcpy_toio(addr, buf, count); 367 mb(); 368 amdgpu_device_flush_hdp(adev, NULL); 369 } else { 370 amdgpu_device_invalidate_hdp(adev, NULL); 371 mb(); 372 memcpy_fromio(buf, addr, count); 373 } 374 375 } 376 377 return count; 378 #else 379 return 0; 380 #endif 381 } 382 383 /** 384 * amdgpu_device_vram_access - read/write a buffer in vram 385 * 386 * @adev: amdgpu_device pointer 387 * @pos: offset of the buffer in vram 388 * @buf: virtual address of the buffer in system memory 389 * @size: read/write size, sizeof(@buf) must > @size 390 * @write: true - write to vram, otherwise - read from vram 391 */ 392 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, 393 void *buf, size_t size, bool write) 394 { 395 size_t count; 396 397 /* try to using vram apreature to access vram first */ 398 count = amdgpu_device_aper_access(adev, pos, buf, size, write); 399 size -= count; 400 if (size) { 401 /* using MM to access rest vram */ 402 pos += count; 403 buf += count; 404 amdgpu_device_mm_access(adev, pos, buf, size, write); 405 } 406 } 407 408 /* 409 * register access helper functions. 410 */ 411 412 /* Check if hw access should be skipped because of hotplug or device error */ 413 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev) 414 { 415 if (adev->no_hw_access) 416 return true; 417 418 #ifdef CONFIG_LOCKDEP 419 /* 420 * This is a bit complicated to understand, so worth a comment. What we assert 421 * here is that the GPU reset is not running on another thread in parallel. 422 * 423 * For this we trylock the read side of the reset semaphore, if that succeeds 424 * we know that the reset is not running in paralell. 425 * 426 * If the trylock fails we assert that we are either already holding the read 427 * side of the lock or are the reset thread itself and hold the write side of 428 * the lock. 429 */ 430 if (in_task()) { 431 if (down_read_trylock(&adev->reset_domain->sem)) 432 up_read(&adev->reset_domain->sem); 433 else 434 lockdep_assert_held(&adev->reset_domain->sem); 435 } 436 #endif 437 return false; 438 } 439 440 /** 441 * amdgpu_device_rreg - read a memory mapped IO or indirect register 442 * 443 * @adev: amdgpu_device pointer 444 * @reg: dword aligned register offset 445 * @acc_flags: access flags which require special behavior 446 * 447 * Returns the 32 bit value from the offset specified. 448 */ 449 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, 450 uint32_t reg, uint32_t acc_flags) 451 { 452 uint32_t ret; 453 454 if (amdgpu_device_skip_hw_access(adev)) 455 return 0; 456 457 if ((reg * 4) < adev->rmmio_size) { 458 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && 459 amdgpu_sriov_runtime(adev) && 460 down_read_trylock(&adev->reset_domain->sem)) { 461 ret = amdgpu_kiq_rreg(adev, reg); 462 up_read(&adev->reset_domain->sem); 463 } else { 464 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); 465 } 466 } else { 467 ret = adev->pcie_rreg(adev, reg * 4); 468 } 469 470 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret); 471 472 return ret; 473 } 474 475 /* 476 * MMIO register read with bytes helper functions 477 * @offset:bytes offset from MMIO start 478 * 479 */ 480 481 /** 482 * amdgpu_mm_rreg8 - read a memory mapped IO register 483 * 484 * @adev: amdgpu_device pointer 485 * @offset: byte aligned register offset 486 * 487 * Returns the 8 bit value from the offset specified. 488 */ 489 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) 490 { 491 if (amdgpu_device_skip_hw_access(adev)) 492 return 0; 493 494 if (offset < adev->rmmio_size) 495 return (readb(adev->rmmio + offset)); 496 BUG(); 497 } 498 499 /* 500 * MMIO register write with bytes helper functions 501 * @offset:bytes offset from MMIO start 502 * @value: the value want to be written to the register 503 * 504 */ 505 /** 506 * amdgpu_mm_wreg8 - read a memory mapped IO register 507 * 508 * @adev: amdgpu_device pointer 509 * @offset: byte aligned register offset 510 * @value: 8 bit value to write 511 * 512 * Writes the value specified to the offset specified. 513 */ 514 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) 515 { 516 if (amdgpu_device_skip_hw_access(adev)) 517 return; 518 519 if (offset < adev->rmmio_size) 520 writeb(value, adev->rmmio + offset); 521 else 522 BUG(); 523 } 524 525 /** 526 * amdgpu_device_wreg - write to a memory mapped IO or indirect register 527 * 528 * @adev: amdgpu_device pointer 529 * @reg: dword aligned register offset 530 * @v: 32 bit value to write to the register 531 * @acc_flags: access flags which require special behavior 532 * 533 * Writes the value specified to the offset specified. 534 */ 535 void amdgpu_device_wreg(struct amdgpu_device *adev, 536 uint32_t reg, uint32_t v, 537 uint32_t acc_flags) 538 { 539 if (amdgpu_device_skip_hw_access(adev)) 540 return; 541 542 if ((reg * 4) < adev->rmmio_size) { 543 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && 544 amdgpu_sriov_runtime(adev) && 545 down_read_trylock(&adev->reset_domain->sem)) { 546 amdgpu_kiq_wreg(adev, reg, v); 547 up_read(&adev->reset_domain->sem); 548 } else { 549 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); 550 } 551 } else { 552 adev->pcie_wreg(adev, reg * 4, v); 553 } 554 555 trace_amdgpu_device_wreg(adev->pdev->device, reg, v); 556 } 557 558 /** 559 * amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if in range 560 * 561 * @adev: amdgpu_device pointer 562 * @reg: mmio/rlc register 563 * @v: value to write 564 * 565 * this function is invoked only for the debugfs register access 566 */ 567 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, 568 uint32_t reg, uint32_t v) 569 { 570 if (amdgpu_device_skip_hw_access(adev)) 571 return; 572 573 if (amdgpu_sriov_fullaccess(adev) && 574 adev->gfx.rlc.funcs && 575 adev->gfx.rlc.funcs->is_rlcg_access_range) { 576 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg)) 577 return amdgpu_sriov_wreg(adev, reg, v, 0, 0); 578 } else if ((reg * 4) >= adev->rmmio_size) { 579 adev->pcie_wreg(adev, reg * 4, v); 580 } else { 581 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); 582 } 583 } 584 585 /** 586 * amdgpu_mm_rdoorbell - read a doorbell dword 587 * 588 * @adev: amdgpu_device pointer 589 * @index: doorbell index 590 * 591 * Returns the value in the doorbell aperture at the 592 * requested doorbell index (CIK). 593 */ 594 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index) 595 { 596 if (amdgpu_device_skip_hw_access(adev)) 597 return 0; 598 599 if (index < adev->doorbell.num_doorbells) { 600 return readl(adev->doorbell.ptr + index); 601 } else { 602 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); 603 return 0; 604 } 605 } 606 607 /** 608 * amdgpu_mm_wdoorbell - write a doorbell dword 609 * 610 * @adev: amdgpu_device pointer 611 * @index: doorbell index 612 * @v: value to write 613 * 614 * Writes @v to the doorbell aperture at the 615 * requested doorbell index (CIK). 616 */ 617 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v) 618 { 619 if (amdgpu_device_skip_hw_access(adev)) 620 return; 621 622 if (index < adev->doorbell.num_doorbells) { 623 writel(v, adev->doorbell.ptr + index); 624 } else { 625 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); 626 } 627 } 628 629 /** 630 * amdgpu_mm_rdoorbell64 - read a doorbell Qword 631 * 632 * @adev: amdgpu_device pointer 633 * @index: doorbell index 634 * 635 * Returns the value in the doorbell aperture at the 636 * requested doorbell index (VEGA10+). 637 */ 638 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index) 639 { 640 if (amdgpu_device_skip_hw_access(adev)) 641 return 0; 642 643 if (index < adev->doorbell.num_doorbells) { 644 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index)); 645 } else { 646 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); 647 return 0; 648 } 649 } 650 651 /** 652 * amdgpu_mm_wdoorbell64 - write a doorbell Qword 653 * 654 * @adev: amdgpu_device pointer 655 * @index: doorbell index 656 * @v: value to write 657 * 658 * Writes @v to the doorbell aperture at the 659 * requested doorbell index (VEGA10+). 660 */ 661 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v) 662 { 663 if (amdgpu_device_skip_hw_access(adev)) 664 return; 665 666 if (index < adev->doorbell.num_doorbells) { 667 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v); 668 } else { 669 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); 670 } 671 } 672 673 /** 674 * amdgpu_device_indirect_rreg - read an indirect register 675 * 676 * @adev: amdgpu_device pointer 677 * @pcie_index: mmio register offset 678 * @pcie_data: mmio register offset 679 * @reg_addr: indirect register address to read from 680 * 681 * Returns the value of indirect register @reg_addr 682 */ 683 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev, 684 u32 pcie_index, u32 pcie_data, 685 u32 reg_addr) 686 { 687 unsigned long flags; 688 u32 r; 689 void __iomem *pcie_index_offset; 690 void __iomem *pcie_data_offset; 691 692 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 693 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 694 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 695 696 writel(reg_addr, pcie_index_offset); 697 readl(pcie_index_offset); 698 r = readl(pcie_data_offset); 699 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 700 701 return r; 702 } 703 704 /** 705 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register 706 * 707 * @adev: amdgpu_device pointer 708 * @pcie_index: mmio register offset 709 * @pcie_data: mmio register offset 710 * @reg_addr: indirect register address to read from 711 * 712 * Returns the value of indirect register @reg_addr 713 */ 714 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev, 715 u32 pcie_index, u32 pcie_data, 716 u32 reg_addr) 717 { 718 unsigned long flags; 719 u64 r; 720 void __iomem *pcie_index_offset; 721 void __iomem *pcie_data_offset; 722 723 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 724 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 725 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 726 727 /* read low 32 bits */ 728 writel(reg_addr, pcie_index_offset); 729 readl(pcie_index_offset); 730 r = readl(pcie_data_offset); 731 /* read high 32 bits */ 732 writel(reg_addr + 4, pcie_index_offset); 733 readl(pcie_index_offset); 734 r |= ((u64)readl(pcie_data_offset) << 32); 735 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 736 737 return r; 738 } 739 740 /** 741 * amdgpu_device_indirect_wreg - write an indirect register address 742 * 743 * @adev: amdgpu_device pointer 744 * @pcie_index: mmio register offset 745 * @pcie_data: mmio register offset 746 * @reg_addr: indirect register offset 747 * @reg_data: indirect register data 748 * 749 */ 750 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev, 751 u32 pcie_index, u32 pcie_data, 752 u32 reg_addr, u32 reg_data) 753 { 754 unsigned long flags; 755 void __iomem *pcie_index_offset; 756 void __iomem *pcie_data_offset; 757 758 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 759 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 760 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 761 762 writel(reg_addr, pcie_index_offset); 763 readl(pcie_index_offset); 764 writel(reg_data, pcie_data_offset); 765 readl(pcie_data_offset); 766 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 767 } 768 769 /** 770 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address 771 * 772 * @adev: amdgpu_device pointer 773 * @pcie_index: mmio register offset 774 * @pcie_data: mmio register offset 775 * @reg_addr: indirect register offset 776 * @reg_data: indirect register data 777 * 778 */ 779 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev, 780 u32 pcie_index, u32 pcie_data, 781 u32 reg_addr, u64 reg_data) 782 { 783 unsigned long flags; 784 void __iomem *pcie_index_offset; 785 void __iomem *pcie_data_offset; 786 787 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 788 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 789 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 790 791 /* write low 32 bits */ 792 writel(reg_addr, pcie_index_offset); 793 readl(pcie_index_offset); 794 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset); 795 readl(pcie_data_offset); 796 /* write high 32 bits */ 797 writel(reg_addr + 4, pcie_index_offset); 798 readl(pcie_index_offset); 799 writel((u32)(reg_data >> 32), pcie_data_offset); 800 readl(pcie_data_offset); 801 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 802 } 803 804 /** 805 * amdgpu_invalid_rreg - dummy reg read function 806 * 807 * @adev: amdgpu_device pointer 808 * @reg: offset of register 809 * 810 * Dummy register read function. Used for register blocks 811 * that certain asics don't have (all asics). 812 * Returns the value in the register. 813 */ 814 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg) 815 { 816 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); 817 BUG(); 818 return 0; 819 } 820 821 /** 822 * amdgpu_invalid_wreg - dummy reg write function 823 * 824 * @adev: amdgpu_device pointer 825 * @reg: offset of register 826 * @v: value to write to the register 827 * 828 * Dummy register read function. Used for register blocks 829 * that certain asics don't have (all asics). 830 */ 831 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) 832 { 833 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", 834 reg, v); 835 BUG(); 836 } 837 838 /** 839 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function 840 * 841 * @adev: amdgpu_device pointer 842 * @reg: offset of register 843 * 844 * Dummy register read function. Used for register blocks 845 * that certain asics don't have (all asics). 846 * Returns the value in the register. 847 */ 848 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg) 849 { 850 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg); 851 BUG(); 852 return 0; 853 } 854 855 /** 856 * amdgpu_invalid_wreg64 - dummy reg write function 857 * 858 * @adev: amdgpu_device pointer 859 * @reg: offset of register 860 * @v: value to write to the register 861 * 862 * Dummy register read function. Used for register blocks 863 * that certain asics don't have (all asics). 864 */ 865 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v) 866 { 867 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n", 868 reg, v); 869 BUG(); 870 } 871 872 /** 873 * amdgpu_block_invalid_rreg - dummy reg read function 874 * 875 * @adev: amdgpu_device pointer 876 * @block: offset of instance 877 * @reg: offset of register 878 * 879 * Dummy register read function. Used for register blocks 880 * that certain asics don't have (all asics). 881 * Returns the value in the register. 882 */ 883 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev, 884 uint32_t block, uint32_t reg) 885 { 886 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n", 887 reg, block); 888 BUG(); 889 return 0; 890 } 891 892 /** 893 * amdgpu_block_invalid_wreg - dummy reg write function 894 * 895 * @adev: amdgpu_device pointer 896 * @block: offset of instance 897 * @reg: offset of register 898 * @v: value to write to the register 899 * 900 * Dummy register read function. Used for register blocks 901 * that certain asics don't have (all asics). 902 */ 903 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev, 904 uint32_t block, 905 uint32_t reg, uint32_t v) 906 { 907 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n", 908 reg, block, v); 909 BUG(); 910 } 911 912 /** 913 * amdgpu_device_asic_init - Wrapper for atom asic_init 914 * 915 * @adev: amdgpu_device pointer 916 * 917 * Does any asic specific work and then calls atom asic init. 918 */ 919 static int amdgpu_device_asic_init(struct amdgpu_device *adev) 920 { 921 amdgpu_asic_pre_asic_init(adev); 922 923 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) 924 return amdgpu_atomfirmware_asic_init(adev, true); 925 else 926 return amdgpu_atom_asic_init(adev->mode_info.atom_context); 927 } 928 929 /** 930 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page 931 * 932 * @adev: amdgpu_device pointer 933 * 934 * Allocates a scratch page of VRAM for use by various things in the 935 * driver. 936 */ 937 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev) 938 { 939 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, 940 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 941 &adev->vram_scratch.robj, 942 &adev->vram_scratch.gpu_addr, 943 (void **)&adev->vram_scratch.ptr); 944 } 945 946 /** 947 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page 948 * 949 * @adev: amdgpu_device pointer 950 * 951 * Frees the VRAM scratch page. 952 */ 953 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev) 954 { 955 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL); 956 } 957 958 /** 959 * amdgpu_device_program_register_sequence - program an array of registers. 960 * 961 * @adev: amdgpu_device pointer 962 * @registers: pointer to the register array 963 * @array_size: size of the register array 964 * 965 * Programs an array or registers with and and or masks. 966 * This is a helper for setting golden registers. 967 */ 968 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, 969 const u32 *registers, 970 const u32 array_size) 971 { 972 u32 tmp, reg, and_mask, or_mask; 973 int i; 974 975 if (array_size % 3) 976 return; 977 978 for (i = 0; i < array_size; i +=3) { 979 reg = registers[i + 0]; 980 and_mask = registers[i + 1]; 981 or_mask = registers[i + 2]; 982 983 if (and_mask == 0xffffffff) { 984 tmp = or_mask; 985 } else { 986 tmp = RREG32(reg); 987 tmp &= ~and_mask; 988 if (adev->family >= AMDGPU_FAMILY_AI) 989 tmp |= (or_mask & and_mask); 990 else 991 tmp |= or_mask; 992 } 993 WREG32(reg, tmp); 994 } 995 } 996 997 /** 998 * amdgpu_device_pci_config_reset - reset the GPU 999 * 1000 * @adev: amdgpu_device pointer 1001 * 1002 * Resets the GPU using the pci config reset sequence. 1003 * Only applicable to asics prior to vega10. 1004 */ 1005 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev) 1006 { 1007 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA); 1008 } 1009 1010 /** 1011 * amdgpu_device_pci_reset - reset the GPU using generic PCI means 1012 * 1013 * @adev: amdgpu_device pointer 1014 * 1015 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.). 1016 */ 1017 int amdgpu_device_pci_reset(struct amdgpu_device *adev) 1018 { 1019 return pci_reset_function(adev->pdev); 1020 } 1021 1022 /* 1023 * GPU doorbell aperture helpers function. 1024 */ 1025 /** 1026 * amdgpu_device_doorbell_init - Init doorbell driver information. 1027 * 1028 * @adev: amdgpu_device pointer 1029 * 1030 * Init doorbell driver information (CIK) 1031 * Returns 0 on success, error on failure. 1032 */ 1033 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev) 1034 { 1035 1036 /* No doorbell on SI hardware generation */ 1037 if (adev->asic_type < CHIP_BONAIRE) { 1038 adev->doorbell.base = 0; 1039 adev->doorbell.size = 0; 1040 adev->doorbell.num_doorbells = 0; 1041 adev->doorbell.ptr = NULL; 1042 return 0; 1043 } 1044 1045 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET) 1046 return -EINVAL; 1047 1048 amdgpu_asic_init_doorbell_index(adev); 1049 1050 /* doorbell bar mapping */ 1051 adev->doorbell.base = pci_resource_start(adev->pdev, 2); 1052 adev->doorbell.size = pci_resource_len(adev->pdev, 2); 1053 1054 if (adev->enable_mes) { 1055 adev->doorbell.num_doorbells = 1056 adev->doorbell.size / sizeof(u32); 1057 } else { 1058 adev->doorbell.num_doorbells = 1059 min_t(u32, adev->doorbell.size / sizeof(u32), 1060 adev->doorbell_index.max_assignment+1); 1061 if (adev->doorbell.num_doorbells == 0) 1062 return -EINVAL; 1063 1064 /* For Vega, reserve and map two pages on doorbell BAR since SDMA 1065 * paging queue doorbell use the second page. The 1066 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the 1067 * doorbells are in the first page. So with paging queue enabled, 1068 * the max num_doorbells should + 1 page (0x400 in dword) 1069 */ 1070 if (adev->asic_type >= CHIP_VEGA10) 1071 adev->doorbell.num_doorbells += 0x400; 1072 } 1073 1074 adev->doorbell.ptr = ioremap(adev->doorbell.base, 1075 adev->doorbell.num_doorbells * 1076 sizeof(u32)); 1077 if (adev->doorbell.ptr == NULL) 1078 return -ENOMEM; 1079 1080 return 0; 1081 } 1082 1083 /** 1084 * amdgpu_device_doorbell_fini - Tear down doorbell driver information. 1085 * 1086 * @adev: amdgpu_device pointer 1087 * 1088 * Tear down doorbell driver information (CIK) 1089 */ 1090 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev) 1091 { 1092 iounmap(adev->doorbell.ptr); 1093 adev->doorbell.ptr = NULL; 1094 } 1095 1096 1097 1098 /* 1099 * amdgpu_device_wb_*() 1100 * Writeback is the method by which the GPU updates special pages in memory 1101 * with the status of certain GPU events (fences, ring pointers,etc.). 1102 */ 1103 1104 /** 1105 * amdgpu_device_wb_fini - Disable Writeback and free memory 1106 * 1107 * @adev: amdgpu_device pointer 1108 * 1109 * Disables Writeback and frees the Writeback memory (all asics). 1110 * Used at driver shutdown. 1111 */ 1112 static void amdgpu_device_wb_fini(struct amdgpu_device *adev) 1113 { 1114 if (adev->wb.wb_obj) { 1115 amdgpu_bo_free_kernel(&adev->wb.wb_obj, 1116 &adev->wb.gpu_addr, 1117 (void **)&adev->wb.wb); 1118 adev->wb.wb_obj = NULL; 1119 } 1120 } 1121 1122 /** 1123 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory 1124 * 1125 * @adev: amdgpu_device pointer 1126 * 1127 * Initializes writeback and allocates writeback memory (all asics). 1128 * Used at driver startup. 1129 * Returns 0 on success or an -error on failure. 1130 */ 1131 static int amdgpu_device_wb_init(struct amdgpu_device *adev) 1132 { 1133 int r; 1134 1135 if (adev->wb.wb_obj == NULL) { 1136 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */ 1137 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8, 1138 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 1139 &adev->wb.wb_obj, &adev->wb.gpu_addr, 1140 (void **)&adev->wb.wb); 1141 if (r) { 1142 dev_warn(adev->dev, "(%d) create WB bo failed\n", r); 1143 return r; 1144 } 1145 1146 adev->wb.num_wb = AMDGPU_MAX_WB; 1147 memset(&adev->wb.used, 0, sizeof(adev->wb.used)); 1148 1149 /* clear wb memory */ 1150 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8); 1151 } 1152 1153 return 0; 1154 } 1155 1156 /** 1157 * amdgpu_device_wb_get - Allocate a wb entry 1158 * 1159 * @adev: amdgpu_device pointer 1160 * @wb: wb index 1161 * 1162 * Allocate a wb slot for use by the driver (all asics). 1163 * Returns 0 on success or -EINVAL on failure. 1164 */ 1165 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb) 1166 { 1167 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); 1168 1169 if (offset < adev->wb.num_wb) { 1170 __set_bit(offset, adev->wb.used); 1171 *wb = offset << 3; /* convert to dw offset */ 1172 return 0; 1173 } else { 1174 return -EINVAL; 1175 } 1176 } 1177 1178 /** 1179 * amdgpu_device_wb_free - Free a wb entry 1180 * 1181 * @adev: amdgpu_device pointer 1182 * @wb: wb index 1183 * 1184 * Free a wb slot allocated for use by the driver (all asics) 1185 */ 1186 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) 1187 { 1188 wb >>= 3; 1189 if (wb < adev->wb.num_wb) 1190 __clear_bit(wb, adev->wb.used); 1191 } 1192 1193 /** 1194 * amdgpu_device_resize_fb_bar - try to resize FB BAR 1195 * 1196 * @adev: amdgpu_device pointer 1197 * 1198 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not 1199 * to fail, but if any of the BARs is not accessible after the size we abort 1200 * driver loading by returning -ENODEV. 1201 */ 1202 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) 1203 { 1204 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size); 1205 struct pci_bus *root; 1206 struct resource *res; 1207 unsigned i; 1208 u16 cmd; 1209 int r; 1210 1211 /* Bypass for VF */ 1212 if (amdgpu_sriov_vf(adev)) 1213 return 0; 1214 1215 /* skip if the bios has already enabled large BAR */ 1216 if (adev->gmc.real_vram_size && 1217 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size)) 1218 return 0; 1219 1220 /* Check if the root BUS has 64bit memory resources */ 1221 root = adev->pdev->bus; 1222 while (root->parent) 1223 root = root->parent; 1224 1225 pci_bus_for_each_resource(root, res, i) { 1226 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) && 1227 res->start > 0x100000000ull) 1228 break; 1229 } 1230 1231 /* Trying to resize is pointless without a root hub window above 4GB */ 1232 if (!res) 1233 return 0; 1234 1235 /* Limit the BAR size to what is available */ 1236 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1, 1237 rbar_size); 1238 1239 /* Disable memory decoding while we change the BAR addresses and size */ 1240 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd); 1241 pci_write_config_word(adev->pdev, PCI_COMMAND, 1242 cmd & ~PCI_COMMAND_MEMORY); 1243 1244 /* Free the VRAM and doorbell BAR, we most likely need to move both. */ 1245 amdgpu_device_doorbell_fini(adev); 1246 if (adev->asic_type >= CHIP_BONAIRE) 1247 pci_release_resource(adev->pdev, 2); 1248 1249 pci_release_resource(adev->pdev, 0); 1250 1251 r = pci_resize_resource(adev->pdev, 0, rbar_size); 1252 if (r == -ENOSPC) 1253 DRM_INFO("Not enough PCI address space for a large BAR."); 1254 else if (r && r != -ENOTSUPP) 1255 DRM_ERROR("Problem resizing BAR0 (%d).", r); 1256 1257 pci_assign_unassigned_bus_resources(adev->pdev->bus); 1258 1259 /* When the doorbell or fb BAR isn't available we have no chance of 1260 * using the device. 1261 */ 1262 r = amdgpu_device_doorbell_init(adev); 1263 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET)) 1264 return -ENODEV; 1265 1266 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd); 1267 1268 return 0; 1269 } 1270 1271 /* 1272 * GPU helpers function. 1273 */ 1274 /** 1275 * amdgpu_device_need_post - check if the hw need post or not 1276 * 1277 * @adev: amdgpu_device pointer 1278 * 1279 * Check if the asic has been initialized (all asics) at driver startup 1280 * or post is needed if hw reset is performed. 1281 * Returns true if need or false if not. 1282 */ 1283 bool amdgpu_device_need_post(struct amdgpu_device *adev) 1284 { 1285 uint32_t reg; 1286 1287 if (amdgpu_sriov_vf(adev)) 1288 return false; 1289 1290 if (amdgpu_passthrough(adev)) { 1291 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot 1292 * some old smc fw still need driver do vPost otherwise gpu hang, while 1293 * those smc fw version above 22.15 doesn't have this flaw, so we force 1294 * vpost executed for smc version below 22.15 1295 */ 1296 if (adev->asic_type == CHIP_FIJI) { 1297 int err; 1298 uint32_t fw_ver; 1299 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev); 1300 /* force vPost if error occured */ 1301 if (err) 1302 return true; 1303 1304 fw_ver = *((uint32_t *)adev->pm.fw->data + 69); 1305 if (fw_ver < 0x00160e00) 1306 return true; 1307 } 1308 } 1309 1310 /* Don't post if we need to reset whole hive on init */ 1311 if (adev->gmc.xgmi.pending_reset) 1312 return false; 1313 1314 if (adev->has_hw_reset) { 1315 adev->has_hw_reset = false; 1316 return true; 1317 } 1318 1319 /* bios scratch used on CIK+ */ 1320 if (adev->asic_type >= CHIP_BONAIRE) 1321 return amdgpu_atombios_scratch_need_asic_init(adev); 1322 1323 /* check MEM_SIZE for older asics */ 1324 reg = amdgpu_asic_get_config_memsize(adev); 1325 1326 if ((reg != 0) && (reg != 0xffffffff)) 1327 return false; 1328 1329 return true; 1330 } 1331 1332 /** 1333 * amdgpu_device_should_use_aspm - check if the device should program ASPM 1334 * 1335 * @adev: amdgpu_device pointer 1336 * 1337 * Confirm whether the module parameter and pcie bridge agree that ASPM should 1338 * be set for this device. 1339 * 1340 * Returns true if it should be used or false if not. 1341 */ 1342 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev) 1343 { 1344 switch (amdgpu_aspm) { 1345 case -1: 1346 break; 1347 case 0: 1348 return false; 1349 case 1: 1350 return true; 1351 default: 1352 return false; 1353 } 1354 return pcie_aspm_enabled(adev->pdev); 1355 } 1356 1357 /* if we get transitioned to only one device, take VGA back */ 1358 /** 1359 * amdgpu_device_vga_set_decode - enable/disable vga decode 1360 * 1361 * @pdev: PCI device pointer 1362 * @state: enable/disable vga decode 1363 * 1364 * Enable/disable vga decode (all asics). 1365 * Returns VGA resource flags. 1366 */ 1367 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev, 1368 bool state) 1369 { 1370 struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev)); 1371 amdgpu_asic_set_vga_state(adev, state); 1372 if (state) 1373 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 1374 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1375 else 1376 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1377 } 1378 1379 /** 1380 * amdgpu_device_check_block_size - validate the vm block size 1381 * 1382 * @adev: amdgpu_device pointer 1383 * 1384 * Validates the vm block size specified via module parameter. 1385 * The vm block size defines number of bits in page table versus page directory, 1386 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the 1387 * page table and the remaining bits are in the page directory. 1388 */ 1389 static void amdgpu_device_check_block_size(struct amdgpu_device *adev) 1390 { 1391 /* defines number of bits in page table versus page directory, 1392 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the 1393 * page table and the remaining bits are in the page directory */ 1394 if (amdgpu_vm_block_size == -1) 1395 return; 1396 1397 if (amdgpu_vm_block_size < 9) { 1398 dev_warn(adev->dev, "VM page table size (%d) too small\n", 1399 amdgpu_vm_block_size); 1400 amdgpu_vm_block_size = -1; 1401 } 1402 } 1403 1404 /** 1405 * amdgpu_device_check_vm_size - validate the vm size 1406 * 1407 * @adev: amdgpu_device pointer 1408 * 1409 * Validates the vm size in GB specified via module parameter. 1410 * The VM size is the size of the GPU virtual memory space in GB. 1411 */ 1412 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev) 1413 { 1414 /* no need to check the default value */ 1415 if (amdgpu_vm_size == -1) 1416 return; 1417 1418 if (amdgpu_vm_size < 1) { 1419 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n", 1420 amdgpu_vm_size); 1421 amdgpu_vm_size = -1; 1422 } 1423 } 1424 1425 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev) 1426 { 1427 struct sysinfo si; 1428 bool is_os_64 = (sizeof(void *) == 8); 1429 uint64_t total_memory; 1430 uint64_t dram_size_seven_GB = 0x1B8000000; 1431 uint64_t dram_size_three_GB = 0xB8000000; 1432 1433 if (amdgpu_smu_memory_pool_size == 0) 1434 return; 1435 1436 if (!is_os_64) { 1437 DRM_WARN("Not 64-bit OS, feature not supported\n"); 1438 goto def_value; 1439 } 1440 si_meminfo(&si); 1441 total_memory = (uint64_t)si.totalram * si.mem_unit; 1442 1443 if ((amdgpu_smu_memory_pool_size == 1) || 1444 (amdgpu_smu_memory_pool_size == 2)) { 1445 if (total_memory < dram_size_three_GB) 1446 goto def_value1; 1447 } else if ((amdgpu_smu_memory_pool_size == 4) || 1448 (amdgpu_smu_memory_pool_size == 8)) { 1449 if (total_memory < dram_size_seven_GB) 1450 goto def_value1; 1451 } else { 1452 DRM_WARN("Smu memory pool size not supported\n"); 1453 goto def_value; 1454 } 1455 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28; 1456 1457 return; 1458 1459 def_value1: 1460 DRM_WARN("No enough system memory\n"); 1461 def_value: 1462 adev->pm.smu_prv_buffer_size = 0; 1463 } 1464 1465 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev) 1466 { 1467 if (!(adev->flags & AMD_IS_APU) || 1468 adev->asic_type < CHIP_RAVEN) 1469 return 0; 1470 1471 switch (adev->asic_type) { 1472 case CHIP_RAVEN: 1473 if (adev->pdev->device == 0x15dd) 1474 adev->apu_flags |= AMD_APU_IS_RAVEN; 1475 if (adev->pdev->device == 0x15d8) 1476 adev->apu_flags |= AMD_APU_IS_PICASSO; 1477 break; 1478 case CHIP_RENOIR: 1479 if ((adev->pdev->device == 0x1636) || 1480 (adev->pdev->device == 0x164c)) 1481 adev->apu_flags |= AMD_APU_IS_RENOIR; 1482 else 1483 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE; 1484 break; 1485 case CHIP_VANGOGH: 1486 adev->apu_flags |= AMD_APU_IS_VANGOGH; 1487 break; 1488 case CHIP_YELLOW_CARP: 1489 break; 1490 case CHIP_CYAN_SKILLFISH: 1491 if ((adev->pdev->device == 0x13FE) || 1492 (adev->pdev->device == 0x143F)) 1493 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2; 1494 break; 1495 default: 1496 break; 1497 } 1498 1499 return 0; 1500 } 1501 1502 /** 1503 * amdgpu_device_check_arguments - validate module params 1504 * 1505 * @adev: amdgpu_device pointer 1506 * 1507 * Validates certain module parameters and updates 1508 * the associated values used by the driver (all asics). 1509 */ 1510 static int amdgpu_device_check_arguments(struct amdgpu_device *adev) 1511 { 1512 if (amdgpu_sched_jobs < 4) { 1513 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", 1514 amdgpu_sched_jobs); 1515 amdgpu_sched_jobs = 4; 1516 } else if (!is_power_of_2(amdgpu_sched_jobs)){ 1517 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n", 1518 amdgpu_sched_jobs); 1519 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); 1520 } 1521 1522 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) { 1523 /* gart size must be greater or equal to 32M */ 1524 dev_warn(adev->dev, "gart size (%d) too small\n", 1525 amdgpu_gart_size); 1526 amdgpu_gart_size = -1; 1527 } 1528 1529 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) { 1530 /* gtt size must be greater or equal to 32M */ 1531 dev_warn(adev->dev, "gtt size (%d) too small\n", 1532 amdgpu_gtt_size); 1533 amdgpu_gtt_size = -1; 1534 } 1535 1536 /* valid range is between 4 and 9 inclusive */ 1537 if (amdgpu_vm_fragment_size != -1 && 1538 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) { 1539 dev_warn(adev->dev, "valid range is between 4 and 9\n"); 1540 amdgpu_vm_fragment_size = -1; 1541 } 1542 1543 if (amdgpu_sched_hw_submission < 2) { 1544 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n", 1545 amdgpu_sched_hw_submission); 1546 amdgpu_sched_hw_submission = 2; 1547 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) { 1548 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n", 1549 amdgpu_sched_hw_submission); 1550 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission); 1551 } 1552 1553 if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) { 1554 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n"); 1555 amdgpu_reset_method = -1; 1556 } 1557 1558 amdgpu_device_check_smu_prv_buffer_size(adev); 1559 1560 amdgpu_device_check_vm_size(adev); 1561 1562 amdgpu_device_check_block_size(adev); 1563 1564 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); 1565 1566 return 0; 1567 } 1568 1569 /** 1570 * amdgpu_switcheroo_set_state - set switcheroo state 1571 * 1572 * @pdev: pci dev pointer 1573 * @state: vga_switcheroo state 1574 * 1575 * Callback for the switcheroo driver. Suspends or resumes 1576 * the asics before or after it is powered up using ACPI methods. 1577 */ 1578 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, 1579 enum vga_switcheroo_state state) 1580 { 1581 struct drm_device *dev = pci_get_drvdata(pdev); 1582 int r; 1583 1584 if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF) 1585 return; 1586 1587 if (state == VGA_SWITCHEROO_ON) { 1588 pr_info("switched on\n"); 1589 /* don't suspend or resume card normally */ 1590 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1591 1592 pci_set_power_state(pdev, PCI_D0); 1593 amdgpu_device_load_pci_state(pdev); 1594 r = pci_enable_device(pdev); 1595 if (r) 1596 DRM_WARN("pci_enable_device failed (%d)\n", r); 1597 amdgpu_device_resume(dev, true); 1598 1599 dev->switch_power_state = DRM_SWITCH_POWER_ON; 1600 } else { 1601 pr_info("switched off\n"); 1602 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1603 amdgpu_device_suspend(dev, true); 1604 amdgpu_device_cache_pci_state(pdev); 1605 /* Shut down the device */ 1606 pci_disable_device(pdev); 1607 pci_set_power_state(pdev, PCI_D3cold); 1608 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 1609 } 1610 } 1611 1612 /** 1613 * amdgpu_switcheroo_can_switch - see if switcheroo state can change 1614 * 1615 * @pdev: pci dev pointer 1616 * 1617 * Callback for the switcheroo driver. Check of the switcheroo 1618 * state can be changed. 1619 * Returns true if the state can be changed, false if not. 1620 */ 1621 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev) 1622 { 1623 struct drm_device *dev = pci_get_drvdata(pdev); 1624 1625 /* 1626 * FIXME: open_count is protected by drm_global_mutex but that would lead to 1627 * locking inversion with the driver load path. And the access here is 1628 * completely racy anyway. So don't bother with locking for now. 1629 */ 1630 return atomic_read(&dev->open_count) == 0; 1631 } 1632 1633 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = { 1634 .set_gpu_state = amdgpu_switcheroo_set_state, 1635 .reprobe = NULL, 1636 .can_switch = amdgpu_switcheroo_can_switch, 1637 }; 1638 1639 /** 1640 * amdgpu_device_ip_set_clockgating_state - set the CG state 1641 * 1642 * @dev: amdgpu_device pointer 1643 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1644 * @state: clockgating state (gate or ungate) 1645 * 1646 * Sets the requested clockgating state for all instances of 1647 * the hardware IP specified. 1648 * Returns the error code from the last instance. 1649 */ 1650 int amdgpu_device_ip_set_clockgating_state(void *dev, 1651 enum amd_ip_block_type block_type, 1652 enum amd_clockgating_state state) 1653 { 1654 struct amdgpu_device *adev = dev; 1655 int i, r = 0; 1656 1657 for (i = 0; i < adev->num_ip_blocks; i++) { 1658 if (!adev->ip_blocks[i].status.valid) 1659 continue; 1660 if (adev->ip_blocks[i].version->type != block_type) 1661 continue; 1662 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state) 1663 continue; 1664 r = adev->ip_blocks[i].version->funcs->set_clockgating_state( 1665 (void *)adev, state); 1666 if (r) 1667 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n", 1668 adev->ip_blocks[i].version->funcs->name, r); 1669 } 1670 return r; 1671 } 1672 1673 /** 1674 * amdgpu_device_ip_set_powergating_state - set the PG state 1675 * 1676 * @dev: amdgpu_device pointer 1677 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1678 * @state: powergating state (gate or ungate) 1679 * 1680 * Sets the requested powergating state for all instances of 1681 * the hardware IP specified. 1682 * Returns the error code from the last instance. 1683 */ 1684 int amdgpu_device_ip_set_powergating_state(void *dev, 1685 enum amd_ip_block_type block_type, 1686 enum amd_powergating_state state) 1687 { 1688 struct amdgpu_device *adev = dev; 1689 int i, r = 0; 1690 1691 for (i = 0; i < adev->num_ip_blocks; i++) { 1692 if (!adev->ip_blocks[i].status.valid) 1693 continue; 1694 if (adev->ip_blocks[i].version->type != block_type) 1695 continue; 1696 if (!adev->ip_blocks[i].version->funcs->set_powergating_state) 1697 continue; 1698 r = adev->ip_blocks[i].version->funcs->set_powergating_state( 1699 (void *)adev, state); 1700 if (r) 1701 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n", 1702 adev->ip_blocks[i].version->funcs->name, r); 1703 } 1704 return r; 1705 } 1706 1707 /** 1708 * amdgpu_device_ip_get_clockgating_state - get the CG state 1709 * 1710 * @adev: amdgpu_device pointer 1711 * @flags: clockgating feature flags 1712 * 1713 * Walks the list of IPs on the device and updates the clockgating 1714 * flags for each IP. 1715 * Updates @flags with the feature flags for each hardware IP where 1716 * clockgating is enabled. 1717 */ 1718 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev, 1719 u64 *flags) 1720 { 1721 int i; 1722 1723 for (i = 0; i < adev->num_ip_blocks; i++) { 1724 if (!adev->ip_blocks[i].status.valid) 1725 continue; 1726 if (adev->ip_blocks[i].version->funcs->get_clockgating_state) 1727 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags); 1728 } 1729 } 1730 1731 /** 1732 * amdgpu_device_ip_wait_for_idle - wait for idle 1733 * 1734 * @adev: amdgpu_device pointer 1735 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1736 * 1737 * Waits for the request hardware IP to be idle. 1738 * Returns 0 for success or a negative error code on failure. 1739 */ 1740 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, 1741 enum amd_ip_block_type block_type) 1742 { 1743 int i, r; 1744 1745 for (i = 0; i < adev->num_ip_blocks; i++) { 1746 if (!adev->ip_blocks[i].status.valid) 1747 continue; 1748 if (adev->ip_blocks[i].version->type == block_type) { 1749 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev); 1750 if (r) 1751 return r; 1752 break; 1753 } 1754 } 1755 return 0; 1756 1757 } 1758 1759 /** 1760 * amdgpu_device_ip_is_idle - is the hardware IP idle 1761 * 1762 * @adev: amdgpu_device pointer 1763 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1764 * 1765 * Check if the hardware IP is idle or not. 1766 * Returns true if it the IP is idle, false if not. 1767 */ 1768 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev, 1769 enum amd_ip_block_type block_type) 1770 { 1771 int i; 1772 1773 for (i = 0; i < adev->num_ip_blocks; i++) { 1774 if (!adev->ip_blocks[i].status.valid) 1775 continue; 1776 if (adev->ip_blocks[i].version->type == block_type) 1777 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev); 1778 } 1779 return true; 1780 1781 } 1782 1783 /** 1784 * amdgpu_device_ip_get_ip_block - get a hw IP pointer 1785 * 1786 * @adev: amdgpu_device pointer 1787 * @type: Type of hardware IP (SMU, GFX, UVD, etc.) 1788 * 1789 * Returns a pointer to the hardware IP block structure 1790 * if it exists for the asic, otherwise NULL. 1791 */ 1792 struct amdgpu_ip_block * 1793 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev, 1794 enum amd_ip_block_type type) 1795 { 1796 int i; 1797 1798 for (i = 0; i < adev->num_ip_blocks; i++) 1799 if (adev->ip_blocks[i].version->type == type) 1800 return &adev->ip_blocks[i]; 1801 1802 return NULL; 1803 } 1804 1805 /** 1806 * amdgpu_device_ip_block_version_cmp 1807 * 1808 * @adev: amdgpu_device pointer 1809 * @type: enum amd_ip_block_type 1810 * @major: major version 1811 * @minor: minor version 1812 * 1813 * return 0 if equal or greater 1814 * return 1 if smaller or the ip_block doesn't exist 1815 */ 1816 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev, 1817 enum amd_ip_block_type type, 1818 u32 major, u32 minor) 1819 { 1820 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type); 1821 1822 if (ip_block && ((ip_block->version->major > major) || 1823 ((ip_block->version->major == major) && 1824 (ip_block->version->minor >= minor)))) 1825 return 0; 1826 1827 return 1; 1828 } 1829 1830 /** 1831 * amdgpu_device_ip_block_add 1832 * 1833 * @adev: amdgpu_device pointer 1834 * @ip_block_version: pointer to the IP to add 1835 * 1836 * Adds the IP block driver information to the collection of IPs 1837 * on the asic. 1838 */ 1839 int amdgpu_device_ip_block_add(struct amdgpu_device *adev, 1840 const struct amdgpu_ip_block_version *ip_block_version) 1841 { 1842 if (!ip_block_version) 1843 return -EINVAL; 1844 1845 switch (ip_block_version->type) { 1846 case AMD_IP_BLOCK_TYPE_VCN: 1847 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK) 1848 return 0; 1849 break; 1850 case AMD_IP_BLOCK_TYPE_JPEG: 1851 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK) 1852 return 0; 1853 break; 1854 default: 1855 break; 1856 } 1857 1858 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks, 1859 ip_block_version->funcs->name); 1860 1861 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version; 1862 1863 return 0; 1864 } 1865 1866 /** 1867 * amdgpu_device_enable_virtual_display - enable virtual display feature 1868 * 1869 * @adev: amdgpu_device pointer 1870 * 1871 * Enabled the virtual display feature if the user has enabled it via 1872 * the module parameter virtual_display. This feature provides a virtual 1873 * display hardware on headless boards or in virtualized environments. 1874 * This function parses and validates the configuration string specified by 1875 * the user and configues the virtual display configuration (number of 1876 * virtual connectors, crtcs, etc.) specified. 1877 */ 1878 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) 1879 { 1880 adev->enable_virtual_display = false; 1881 1882 if (amdgpu_virtual_display) { 1883 const char *pci_address_name = pci_name(adev->pdev); 1884 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname; 1885 1886 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL); 1887 pciaddstr_tmp = pciaddstr; 1888 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) { 1889 pciaddname = strsep(&pciaddname_tmp, ","); 1890 if (!strcmp("all", pciaddname) 1891 || !strcmp(pci_address_name, pciaddname)) { 1892 long num_crtc; 1893 int res = -1; 1894 1895 adev->enable_virtual_display = true; 1896 1897 if (pciaddname_tmp) 1898 res = kstrtol(pciaddname_tmp, 10, 1899 &num_crtc); 1900 1901 if (!res) { 1902 if (num_crtc < 1) 1903 num_crtc = 1; 1904 if (num_crtc > 6) 1905 num_crtc = 6; 1906 adev->mode_info.num_crtc = num_crtc; 1907 } else { 1908 adev->mode_info.num_crtc = 1; 1909 } 1910 break; 1911 } 1912 } 1913 1914 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n", 1915 amdgpu_virtual_display, pci_address_name, 1916 adev->enable_virtual_display, adev->mode_info.num_crtc); 1917 1918 kfree(pciaddstr); 1919 } 1920 } 1921 1922 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev) 1923 { 1924 if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) { 1925 adev->mode_info.num_crtc = 1; 1926 adev->enable_virtual_display = true; 1927 DRM_INFO("virtual_display:%d, num_crtc:%d\n", 1928 adev->enable_virtual_display, adev->mode_info.num_crtc); 1929 } 1930 } 1931 1932 /** 1933 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware 1934 * 1935 * @adev: amdgpu_device pointer 1936 * 1937 * Parses the asic configuration parameters specified in the gpu info 1938 * firmware and makes them availale to the driver for use in configuring 1939 * the asic. 1940 * Returns 0 on success, -EINVAL on failure. 1941 */ 1942 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) 1943 { 1944 const char *chip_name; 1945 char fw_name[40]; 1946 int err; 1947 const struct gpu_info_firmware_header_v1_0 *hdr; 1948 1949 adev->firmware.gpu_info_fw = NULL; 1950 1951 if (adev->mman.discovery_bin) { 1952 /* 1953 * FIXME: The bounding box is still needed by Navi12, so 1954 * temporarily read it from gpu_info firmware. Should be dropped 1955 * when DAL no longer needs it. 1956 */ 1957 if (adev->asic_type != CHIP_NAVI12) 1958 return 0; 1959 } 1960 1961 switch (adev->asic_type) { 1962 default: 1963 return 0; 1964 case CHIP_VEGA10: 1965 chip_name = "vega10"; 1966 break; 1967 case CHIP_VEGA12: 1968 chip_name = "vega12"; 1969 break; 1970 case CHIP_RAVEN: 1971 if (adev->apu_flags & AMD_APU_IS_RAVEN2) 1972 chip_name = "raven2"; 1973 else if (adev->apu_flags & AMD_APU_IS_PICASSO) 1974 chip_name = "picasso"; 1975 else 1976 chip_name = "raven"; 1977 break; 1978 case CHIP_ARCTURUS: 1979 chip_name = "arcturus"; 1980 break; 1981 case CHIP_NAVI12: 1982 chip_name = "navi12"; 1983 break; 1984 } 1985 1986 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name); 1987 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev); 1988 if (err) { 1989 dev_err(adev->dev, 1990 "Failed to load gpu_info firmware \"%s\"\n", 1991 fw_name); 1992 goto out; 1993 } 1994 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw); 1995 if (err) { 1996 dev_err(adev->dev, 1997 "Failed to validate gpu_info firmware \"%s\"\n", 1998 fw_name); 1999 goto out; 2000 } 2001 2002 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data; 2003 amdgpu_ucode_print_gpu_info_hdr(&hdr->header); 2004 2005 switch (hdr->version_major) { 2006 case 1: 2007 { 2008 const struct gpu_info_firmware_v1_0 *gpu_info_fw = 2009 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data + 2010 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 2011 2012 /* 2013 * Should be droped when DAL no longer needs it. 2014 */ 2015 if (adev->asic_type == CHIP_NAVI12) 2016 goto parse_soc_bounding_box; 2017 2018 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se); 2019 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh); 2020 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se); 2021 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se); 2022 adev->gfx.config.max_texture_channel_caches = 2023 le32_to_cpu(gpu_info_fw->gc_num_tccs); 2024 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs); 2025 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds); 2026 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth); 2027 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth); 2028 adev->gfx.config.double_offchip_lds_buf = 2029 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer); 2030 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size); 2031 adev->gfx.cu_info.max_waves_per_simd = 2032 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd); 2033 adev->gfx.cu_info.max_scratch_slots_per_cu = 2034 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu); 2035 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size); 2036 if (hdr->version_minor >= 1) { 2037 const struct gpu_info_firmware_v1_1 *gpu_info_fw = 2038 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data + 2039 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 2040 adev->gfx.config.num_sc_per_sh = 2041 le32_to_cpu(gpu_info_fw->num_sc_per_sh); 2042 adev->gfx.config.num_packer_per_sc = 2043 le32_to_cpu(gpu_info_fw->num_packer_per_sc); 2044 } 2045 2046 parse_soc_bounding_box: 2047 /* 2048 * soc bounding box info is not integrated in disocovery table, 2049 * we always need to parse it from gpu info firmware if needed. 2050 */ 2051 if (hdr->version_minor == 2) { 2052 const struct gpu_info_firmware_v1_2 *gpu_info_fw = 2053 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data + 2054 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 2055 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box; 2056 } 2057 break; 2058 } 2059 default: 2060 dev_err(adev->dev, 2061 "Unsupported gpu_info table %d\n", hdr->header.ucode_version); 2062 err = -EINVAL; 2063 goto out; 2064 } 2065 out: 2066 return err; 2067 } 2068 2069 /** 2070 * amdgpu_device_ip_early_init - run early init for hardware IPs 2071 * 2072 * @adev: amdgpu_device pointer 2073 * 2074 * Early initialization pass for hardware IPs. The hardware IPs that make 2075 * up each asic are discovered each IP's early_init callback is run. This 2076 * is the first stage in initializing the asic. 2077 * Returns 0 on success, negative error code on failure. 2078 */ 2079 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) 2080 { 2081 struct drm_device *dev = adev_to_drm(adev); 2082 struct pci_dev *parent; 2083 int i, r; 2084 2085 amdgpu_device_enable_virtual_display(adev); 2086 2087 if (amdgpu_sriov_vf(adev)) { 2088 r = amdgpu_virt_request_full_gpu(adev, true); 2089 if (r) 2090 return r; 2091 } 2092 2093 switch (adev->asic_type) { 2094 #ifdef CONFIG_DRM_AMDGPU_SI 2095 case CHIP_VERDE: 2096 case CHIP_TAHITI: 2097 case CHIP_PITCAIRN: 2098 case CHIP_OLAND: 2099 case CHIP_HAINAN: 2100 adev->family = AMDGPU_FAMILY_SI; 2101 r = si_set_ip_blocks(adev); 2102 if (r) 2103 return r; 2104 break; 2105 #endif 2106 #ifdef CONFIG_DRM_AMDGPU_CIK 2107 case CHIP_BONAIRE: 2108 case CHIP_HAWAII: 2109 case CHIP_KAVERI: 2110 case CHIP_KABINI: 2111 case CHIP_MULLINS: 2112 if (adev->flags & AMD_IS_APU) 2113 adev->family = AMDGPU_FAMILY_KV; 2114 else 2115 adev->family = AMDGPU_FAMILY_CI; 2116 2117 r = cik_set_ip_blocks(adev); 2118 if (r) 2119 return r; 2120 break; 2121 #endif 2122 case CHIP_TOPAZ: 2123 case CHIP_TONGA: 2124 case CHIP_FIJI: 2125 case CHIP_POLARIS10: 2126 case CHIP_POLARIS11: 2127 case CHIP_POLARIS12: 2128 case CHIP_VEGAM: 2129 case CHIP_CARRIZO: 2130 case CHIP_STONEY: 2131 if (adev->flags & AMD_IS_APU) 2132 adev->family = AMDGPU_FAMILY_CZ; 2133 else 2134 adev->family = AMDGPU_FAMILY_VI; 2135 2136 r = vi_set_ip_blocks(adev); 2137 if (r) 2138 return r; 2139 break; 2140 default: 2141 r = amdgpu_discovery_set_ip_blocks(adev); 2142 if (r) 2143 return r; 2144 break; 2145 } 2146 2147 if (amdgpu_has_atpx() && 2148 (amdgpu_is_atpx_hybrid() || 2149 amdgpu_has_atpx_dgpu_power_cntl()) && 2150 ((adev->flags & AMD_IS_APU) == 0) && 2151 !pci_is_thunderbolt_attached(to_pci_dev(dev->dev))) 2152 adev->flags |= AMD_IS_PX; 2153 2154 if (!(adev->flags & AMD_IS_APU)) { 2155 parent = pci_upstream_bridge(adev->pdev); 2156 adev->has_pr3 = parent ? pci_pr3_present(parent) : false; 2157 } 2158 2159 amdgpu_amdkfd_device_probe(adev); 2160 2161 adev->pm.pp_feature = amdgpu_pp_feature_mask; 2162 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS) 2163 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 2164 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID) 2165 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK; 2166 2167 for (i = 0; i < adev->num_ip_blocks; i++) { 2168 if ((amdgpu_ip_block_mask & (1 << i)) == 0) { 2169 DRM_ERROR("disabled ip block: %d <%s>\n", 2170 i, adev->ip_blocks[i].version->funcs->name); 2171 adev->ip_blocks[i].status.valid = false; 2172 } else { 2173 if (adev->ip_blocks[i].version->funcs->early_init) { 2174 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev); 2175 if (r == -ENOENT) { 2176 adev->ip_blocks[i].status.valid = false; 2177 } else if (r) { 2178 DRM_ERROR("early_init of IP block <%s> failed %d\n", 2179 adev->ip_blocks[i].version->funcs->name, r); 2180 return r; 2181 } else { 2182 adev->ip_blocks[i].status.valid = true; 2183 } 2184 } else { 2185 adev->ip_blocks[i].status.valid = true; 2186 } 2187 } 2188 /* get the vbios after the asic_funcs are set up */ 2189 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { 2190 r = amdgpu_device_parse_gpu_info_fw(adev); 2191 if (r) 2192 return r; 2193 2194 /* Read BIOS */ 2195 if (!amdgpu_get_bios(adev)) 2196 return -EINVAL; 2197 2198 r = amdgpu_atombios_init(adev); 2199 if (r) { 2200 dev_err(adev->dev, "amdgpu_atombios_init failed\n"); 2201 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0); 2202 return r; 2203 } 2204 2205 /*get pf2vf msg info at it's earliest time*/ 2206 if (amdgpu_sriov_vf(adev)) 2207 amdgpu_virt_init_data_exchange(adev); 2208 2209 } 2210 } 2211 2212 adev->cg_flags &= amdgpu_cg_mask; 2213 adev->pg_flags &= amdgpu_pg_mask; 2214 2215 return 0; 2216 } 2217 2218 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev) 2219 { 2220 int i, r; 2221 2222 for (i = 0; i < adev->num_ip_blocks; i++) { 2223 if (!adev->ip_blocks[i].status.sw) 2224 continue; 2225 if (adev->ip_blocks[i].status.hw) 2226 continue; 2227 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 2228 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) || 2229 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { 2230 r = adev->ip_blocks[i].version->funcs->hw_init(adev); 2231 if (r) { 2232 DRM_ERROR("hw_init of IP block <%s> failed %d\n", 2233 adev->ip_blocks[i].version->funcs->name, r); 2234 return r; 2235 } 2236 adev->ip_blocks[i].status.hw = true; 2237 } 2238 } 2239 2240 return 0; 2241 } 2242 2243 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev) 2244 { 2245 int i, r; 2246 2247 for (i = 0; i < adev->num_ip_blocks; i++) { 2248 if (!adev->ip_blocks[i].status.sw) 2249 continue; 2250 if (adev->ip_blocks[i].status.hw) 2251 continue; 2252 r = adev->ip_blocks[i].version->funcs->hw_init(adev); 2253 if (r) { 2254 DRM_ERROR("hw_init of IP block <%s> failed %d\n", 2255 adev->ip_blocks[i].version->funcs->name, r); 2256 return r; 2257 } 2258 adev->ip_blocks[i].status.hw = true; 2259 } 2260 2261 return 0; 2262 } 2263 2264 static int amdgpu_device_fw_loading(struct amdgpu_device *adev) 2265 { 2266 int r = 0; 2267 int i; 2268 uint32_t smu_version; 2269 2270 if (adev->asic_type >= CHIP_VEGA10) { 2271 for (i = 0; i < adev->num_ip_blocks; i++) { 2272 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP) 2273 continue; 2274 2275 if (!adev->ip_blocks[i].status.sw) 2276 continue; 2277 2278 /* no need to do the fw loading again if already done*/ 2279 if (adev->ip_blocks[i].status.hw == true) 2280 break; 2281 2282 if (amdgpu_in_reset(adev) || adev->in_suspend) { 2283 r = adev->ip_blocks[i].version->funcs->resume(adev); 2284 if (r) { 2285 DRM_ERROR("resume of IP block <%s> failed %d\n", 2286 adev->ip_blocks[i].version->funcs->name, r); 2287 return r; 2288 } 2289 } else { 2290 r = adev->ip_blocks[i].version->funcs->hw_init(adev); 2291 if (r) { 2292 DRM_ERROR("hw_init of IP block <%s> failed %d\n", 2293 adev->ip_blocks[i].version->funcs->name, r); 2294 return r; 2295 } 2296 } 2297 2298 adev->ip_blocks[i].status.hw = true; 2299 break; 2300 } 2301 } 2302 2303 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA) 2304 r = amdgpu_pm_load_smu_firmware(adev, &smu_version); 2305 2306 return r; 2307 } 2308 2309 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev) 2310 { 2311 long timeout; 2312 int r, i; 2313 2314 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 2315 struct amdgpu_ring *ring = adev->rings[i]; 2316 2317 /* No need to setup the GPU scheduler for rings that don't need it */ 2318 if (!ring || ring->no_scheduler) 2319 continue; 2320 2321 switch (ring->funcs->type) { 2322 case AMDGPU_RING_TYPE_GFX: 2323 timeout = adev->gfx_timeout; 2324 break; 2325 case AMDGPU_RING_TYPE_COMPUTE: 2326 timeout = adev->compute_timeout; 2327 break; 2328 case AMDGPU_RING_TYPE_SDMA: 2329 timeout = adev->sdma_timeout; 2330 break; 2331 default: 2332 timeout = adev->video_timeout; 2333 break; 2334 } 2335 2336 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, 2337 ring->num_hw_submission, amdgpu_job_hang_limit, 2338 timeout, adev->reset_domain->wq, 2339 ring->sched_score, ring->name, 2340 adev->dev); 2341 if (r) { 2342 DRM_ERROR("Failed to create scheduler on ring %s.\n", 2343 ring->name); 2344 return r; 2345 } 2346 } 2347 2348 return 0; 2349 } 2350 2351 2352 /** 2353 * amdgpu_device_ip_init - run init for hardware IPs 2354 * 2355 * @adev: amdgpu_device pointer 2356 * 2357 * Main initialization pass for hardware IPs. The list of all the hardware 2358 * IPs that make up the asic is walked and the sw_init and hw_init callbacks 2359 * are run. sw_init initializes the software state associated with each IP 2360 * and hw_init initializes the hardware associated with each IP. 2361 * Returns 0 on success, negative error code on failure. 2362 */ 2363 static int amdgpu_device_ip_init(struct amdgpu_device *adev) 2364 { 2365 int i, r; 2366 2367 r = amdgpu_ras_init(adev); 2368 if (r) 2369 return r; 2370 2371 for (i = 0; i < adev->num_ip_blocks; i++) { 2372 if (!adev->ip_blocks[i].status.valid) 2373 continue; 2374 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev); 2375 if (r) { 2376 DRM_ERROR("sw_init of IP block <%s> failed %d\n", 2377 adev->ip_blocks[i].version->funcs->name, r); 2378 goto init_failed; 2379 } 2380 adev->ip_blocks[i].status.sw = true; 2381 2382 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { 2383 /* need to do common hw init early so everything is set up for gmc */ 2384 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); 2385 if (r) { 2386 DRM_ERROR("hw_init %d failed %d\n", i, r); 2387 goto init_failed; 2388 } 2389 adev->ip_blocks[i].status.hw = true; 2390 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { 2391 /* need to do gmc hw init early so we can allocate gpu mem */ 2392 /* Try to reserve bad pages early */ 2393 if (amdgpu_sriov_vf(adev)) 2394 amdgpu_virt_exchange_data(adev); 2395 2396 r = amdgpu_device_vram_scratch_init(adev); 2397 if (r) { 2398 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); 2399 goto init_failed; 2400 } 2401 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); 2402 if (r) { 2403 DRM_ERROR("hw_init %d failed %d\n", i, r); 2404 goto init_failed; 2405 } 2406 r = amdgpu_device_wb_init(adev); 2407 if (r) { 2408 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r); 2409 goto init_failed; 2410 } 2411 adev->ip_blocks[i].status.hw = true; 2412 2413 /* right after GMC hw init, we create CSA */ 2414 if (amdgpu_mcbp) { 2415 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj, 2416 AMDGPU_GEM_DOMAIN_VRAM, 2417 AMDGPU_CSA_SIZE); 2418 if (r) { 2419 DRM_ERROR("allocate CSA failed %d\n", r); 2420 goto init_failed; 2421 } 2422 } 2423 } 2424 } 2425 2426 if (amdgpu_sriov_vf(adev)) 2427 amdgpu_virt_init_data_exchange(adev); 2428 2429 r = amdgpu_ib_pool_init(adev); 2430 if (r) { 2431 dev_err(adev->dev, "IB initialization failed (%d).\n", r); 2432 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r); 2433 goto init_failed; 2434 } 2435 2436 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/ 2437 if (r) 2438 goto init_failed; 2439 2440 r = amdgpu_device_ip_hw_init_phase1(adev); 2441 if (r) 2442 goto init_failed; 2443 2444 r = amdgpu_device_fw_loading(adev); 2445 if (r) 2446 goto init_failed; 2447 2448 r = amdgpu_device_ip_hw_init_phase2(adev); 2449 if (r) 2450 goto init_failed; 2451 2452 /* 2453 * retired pages will be loaded from eeprom and reserved here, 2454 * it should be called after amdgpu_device_ip_hw_init_phase2 since 2455 * for some ASICs the RAS EEPROM code relies on SMU fully functioning 2456 * for I2C communication which only true at this point. 2457 * 2458 * amdgpu_ras_recovery_init may fail, but the upper only cares the 2459 * failure from bad gpu situation and stop amdgpu init process 2460 * accordingly. For other failed cases, it will still release all 2461 * the resource and print error message, rather than returning one 2462 * negative value to upper level. 2463 * 2464 * Note: theoretically, this should be called before all vram allocations 2465 * to protect retired page from abusing 2466 */ 2467 r = amdgpu_ras_recovery_init(adev); 2468 if (r) 2469 goto init_failed; 2470 2471 /** 2472 * In case of XGMI grab extra reference for reset domain for this device 2473 */ 2474 if (adev->gmc.xgmi.num_physical_nodes > 1) { 2475 if (amdgpu_xgmi_add_device(adev) == 0) { 2476 if (!amdgpu_sriov_vf(adev)) { 2477 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); 2478 2479 if (WARN_ON(!hive)) { 2480 r = -ENOENT; 2481 goto init_failed; 2482 } 2483 2484 if (!hive->reset_domain || 2485 !amdgpu_reset_get_reset_domain(hive->reset_domain)) { 2486 r = -ENOENT; 2487 amdgpu_put_xgmi_hive(hive); 2488 goto init_failed; 2489 } 2490 2491 /* Drop the early temporary reset domain we created for device */ 2492 amdgpu_reset_put_reset_domain(adev->reset_domain); 2493 adev->reset_domain = hive->reset_domain; 2494 amdgpu_put_xgmi_hive(hive); 2495 } 2496 } 2497 } 2498 2499 r = amdgpu_device_init_schedulers(adev); 2500 if (r) 2501 goto init_failed; 2502 2503 /* Don't init kfd if whole hive need to be reset during init */ 2504 if (!adev->gmc.xgmi.pending_reset) 2505 amdgpu_amdkfd_device_init(adev); 2506 2507 amdgpu_fru_get_product_info(adev); 2508 2509 init_failed: 2510 if (amdgpu_sriov_vf(adev)) 2511 amdgpu_virt_release_full_gpu(adev, true); 2512 2513 return r; 2514 } 2515 2516 /** 2517 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer 2518 * 2519 * @adev: amdgpu_device pointer 2520 * 2521 * Writes a reset magic value to the gart pointer in VRAM. The driver calls 2522 * this function before a GPU reset. If the value is retained after a 2523 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents. 2524 */ 2525 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev) 2526 { 2527 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM); 2528 } 2529 2530 /** 2531 * amdgpu_device_check_vram_lost - check if vram is valid 2532 * 2533 * @adev: amdgpu_device pointer 2534 * 2535 * Checks the reset magic value written to the gart pointer in VRAM. 2536 * The driver calls this after a GPU reset to see if the contents of 2537 * VRAM is lost or now. 2538 * returns true if vram is lost, false if not. 2539 */ 2540 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev) 2541 { 2542 if (memcmp(adev->gart.ptr, adev->reset_magic, 2543 AMDGPU_RESET_MAGIC_NUM)) 2544 return true; 2545 2546 if (!amdgpu_in_reset(adev)) 2547 return false; 2548 2549 /* 2550 * For all ASICs with baco/mode1 reset, the VRAM is 2551 * always assumed to be lost. 2552 */ 2553 switch (amdgpu_asic_reset_method(adev)) { 2554 case AMD_RESET_METHOD_BACO: 2555 case AMD_RESET_METHOD_MODE1: 2556 return true; 2557 default: 2558 return false; 2559 } 2560 } 2561 2562 /** 2563 * amdgpu_device_set_cg_state - set clockgating for amdgpu device 2564 * 2565 * @adev: amdgpu_device pointer 2566 * @state: clockgating state (gate or ungate) 2567 * 2568 * The list of all the hardware IPs that make up the asic is walked and the 2569 * set_clockgating_state callbacks are run. 2570 * Late initialization pass enabling clockgating for hardware IPs. 2571 * Fini or suspend, pass disabling clockgating for hardware IPs. 2572 * Returns 0 on success, negative error code on failure. 2573 */ 2574 2575 int amdgpu_device_set_cg_state(struct amdgpu_device *adev, 2576 enum amd_clockgating_state state) 2577 { 2578 int i, j, r; 2579 2580 if (amdgpu_emu_mode == 1) 2581 return 0; 2582 2583 for (j = 0; j < adev->num_ip_blocks; j++) { 2584 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; 2585 if (!adev->ip_blocks[i].status.late_initialized) 2586 continue; 2587 /* skip CG for GFX on S0ix */ 2588 if (adev->in_s0ix && 2589 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX) 2590 continue; 2591 /* skip CG for VCE/UVD, it's handled specially */ 2592 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 2593 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && 2594 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && 2595 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && 2596 adev->ip_blocks[i].version->funcs->set_clockgating_state) { 2597 /* enable clockgating to save power */ 2598 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, 2599 state); 2600 if (r) { 2601 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", 2602 adev->ip_blocks[i].version->funcs->name, r); 2603 return r; 2604 } 2605 } 2606 } 2607 2608 return 0; 2609 } 2610 2611 int amdgpu_device_set_pg_state(struct amdgpu_device *adev, 2612 enum amd_powergating_state state) 2613 { 2614 int i, j, r; 2615 2616 if (amdgpu_emu_mode == 1) 2617 return 0; 2618 2619 for (j = 0; j < adev->num_ip_blocks; j++) { 2620 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; 2621 if (!adev->ip_blocks[i].status.late_initialized) 2622 continue; 2623 /* skip PG for GFX on S0ix */ 2624 if (adev->in_s0ix && 2625 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX) 2626 continue; 2627 /* skip CG for VCE/UVD, it's handled specially */ 2628 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 2629 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && 2630 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && 2631 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && 2632 adev->ip_blocks[i].version->funcs->set_powergating_state) { 2633 /* enable powergating to save power */ 2634 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev, 2635 state); 2636 if (r) { 2637 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n", 2638 adev->ip_blocks[i].version->funcs->name, r); 2639 return r; 2640 } 2641 } 2642 } 2643 return 0; 2644 } 2645 2646 static int amdgpu_device_enable_mgpu_fan_boost(void) 2647 { 2648 struct amdgpu_gpu_instance *gpu_ins; 2649 struct amdgpu_device *adev; 2650 int i, ret = 0; 2651 2652 mutex_lock(&mgpu_info.mutex); 2653 2654 /* 2655 * MGPU fan boost feature should be enabled 2656 * only when there are two or more dGPUs in 2657 * the system 2658 */ 2659 if (mgpu_info.num_dgpu < 2) 2660 goto out; 2661 2662 for (i = 0; i < mgpu_info.num_dgpu; i++) { 2663 gpu_ins = &(mgpu_info.gpu_ins[i]); 2664 adev = gpu_ins->adev; 2665 if (!(adev->flags & AMD_IS_APU) && 2666 !gpu_ins->mgpu_fan_enabled) { 2667 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev); 2668 if (ret) 2669 break; 2670 2671 gpu_ins->mgpu_fan_enabled = 1; 2672 } 2673 } 2674 2675 out: 2676 mutex_unlock(&mgpu_info.mutex); 2677 2678 return ret; 2679 } 2680 2681 /** 2682 * amdgpu_device_ip_late_init - run late init for hardware IPs 2683 * 2684 * @adev: amdgpu_device pointer 2685 * 2686 * Late initialization pass for hardware IPs. The list of all the hardware 2687 * IPs that make up the asic is walked and the late_init callbacks are run. 2688 * late_init covers any special initialization that an IP requires 2689 * after all of the have been initialized or something that needs to happen 2690 * late in the init process. 2691 * Returns 0 on success, negative error code on failure. 2692 */ 2693 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) 2694 { 2695 struct amdgpu_gpu_instance *gpu_instance; 2696 int i = 0, r; 2697 2698 for (i = 0; i < adev->num_ip_blocks; i++) { 2699 if (!adev->ip_blocks[i].status.hw) 2700 continue; 2701 if (adev->ip_blocks[i].version->funcs->late_init) { 2702 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev); 2703 if (r) { 2704 DRM_ERROR("late_init of IP block <%s> failed %d\n", 2705 adev->ip_blocks[i].version->funcs->name, r); 2706 return r; 2707 } 2708 } 2709 adev->ip_blocks[i].status.late_initialized = true; 2710 } 2711 2712 r = amdgpu_ras_late_init(adev); 2713 if (r) { 2714 DRM_ERROR("amdgpu_ras_late_init failed %d", r); 2715 return r; 2716 } 2717 2718 amdgpu_ras_set_error_query_ready(adev, true); 2719 2720 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE); 2721 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE); 2722 2723 amdgpu_device_fill_reset_magic(adev); 2724 2725 r = amdgpu_device_enable_mgpu_fan_boost(); 2726 if (r) 2727 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r); 2728 2729 /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */ 2730 if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)|| 2731 adev->asic_type == CHIP_ALDEBARAN )) 2732 amdgpu_dpm_handle_passthrough_sbr(adev, true); 2733 2734 if (adev->gmc.xgmi.num_physical_nodes > 1) { 2735 mutex_lock(&mgpu_info.mutex); 2736 2737 /* 2738 * Reset device p-state to low as this was booted with high. 2739 * 2740 * This should be performed only after all devices from the same 2741 * hive get initialized. 2742 * 2743 * However, it's unknown how many device in the hive in advance. 2744 * As this is counted one by one during devices initializations. 2745 * 2746 * So, we wait for all XGMI interlinked devices initialized. 2747 * This may bring some delays as those devices may come from 2748 * different hives. But that should be OK. 2749 */ 2750 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) { 2751 for (i = 0; i < mgpu_info.num_gpu; i++) { 2752 gpu_instance = &(mgpu_info.gpu_ins[i]); 2753 if (gpu_instance->adev->flags & AMD_IS_APU) 2754 continue; 2755 2756 r = amdgpu_xgmi_set_pstate(gpu_instance->adev, 2757 AMDGPU_XGMI_PSTATE_MIN); 2758 if (r) { 2759 DRM_ERROR("pstate setting failed (%d).\n", r); 2760 break; 2761 } 2762 } 2763 } 2764 2765 mutex_unlock(&mgpu_info.mutex); 2766 } 2767 2768 return 0; 2769 } 2770 2771 /** 2772 * amdgpu_device_smu_fini_early - smu hw_fini wrapper 2773 * 2774 * @adev: amdgpu_device pointer 2775 * 2776 * For ASICs need to disable SMC first 2777 */ 2778 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev) 2779 { 2780 int i, r; 2781 2782 if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0)) 2783 return; 2784 2785 for (i = 0; i < adev->num_ip_blocks; i++) { 2786 if (!adev->ip_blocks[i].status.hw) 2787 continue; 2788 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { 2789 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); 2790 /* XXX handle errors */ 2791 if (r) { 2792 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", 2793 adev->ip_blocks[i].version->funcs->name, r); 2794 } 2795 adev->ip_blocks[i].status.hw = false; 2796 break; 2797 } 2798 } 2799 } 2800 2801 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) 2802 { 2803 int i, r; 2804 2805 for (i = 0; i < adev->num_ip_blocks; i++) { 2806 if (!adev->ip_blocks[i].version->funcs->early_fini) 2807 continue; 2808 2809 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev); 2810 if (r) { 2811 DRM_DEBUG("early_fini of IP block <%s> failed %d\n", 2812 adev->ip_blocks[i].version->funcs->name, r); 2813 } 2814 } 2815 2816 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); 2817 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); 2818 2819 amdgpu_amdkfd_suspend(adev, false); 2820 2821 /* Workaroud for ASICs need to disable SMC first */ 2822 amdgpu_device_smu_fini_early(adev); 2823 2824 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2825 if (!adev->ip_blocks[i].status.hw) 2826 continue; 2827 2828 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); 2829 /* XXX handle errors */ 2830 if (r) { 2831 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", 2832 adev->ip_blocks[i].version->funcs->name, r); 2833 } 2834 2835 adev->ip_blocks[i].status.hw = false; 2836 } 2837 2838 if (amdgpu_sriov_vf(adev)) { 2839 if (amdgpu_virt_release_full_gpu(adev, false)) 2840 DRM_ERROR("failed to release exclusive mode on fini\n"); 2841 } 2842 2843 return 0; 2844 } 2845 2846 /** 2847 * amdgpu_device_ip_fini - run fini for hardware IPs 2848 * 2849 * @adev: amdgpu_device pointer 2850 * 2851 * Main teardown pass for hardware IPs. The list of all the hardware 2852 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks 2853 * are run. hw_fini tears down the hardware associated with each IP 2854 * and sw_fini tears down any software state associated with each IP. 2855 * Returns 0 on success, negative error code on failure. 2856 */ 2857 static int amdgpu_device_ip_fini(struct amdgpu_device *adev) 2858 { 2859 int i, r; 2860 2861 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done) 2862 amdgpu_virt_release_ras_err_handler_data(adev); 2863 2864 if (adev->gmc.xgmi.num_physical_nodes > 1) 2865 amdgpu_xgmi_remove_device(adev); 2866 2867 amdgpu_amdkfd_device_fini_sw(adev); 2868 2869 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2870 if (!adev->ip_blocks[i].status.sw) 2871 continue; 2872 2873 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { 2874 amdgpu_ucode_free_bo(adev); 2875 amdgpu_free_static_csa(&adev->virt.csa_obj); 2876 amdgpu_device_wb_fini(adev); 2877 amdgpu_device_vram_scratch_fini(adev); 2878 amdgpu_ib_pool_fini(adev); 2879 } 2880 2881 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev); 2882 /* XXX handle errors */ 2883 if (r) { 2884 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", 2885 adev->ip_blocks[i].version->funcs->name, r); 2886 } 2887 adev->ip_blocks[i].status.sw = false; 2888 adev->ip_blocks[i].status.valid = false; 2889 } 2890 2891 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2892 if (!adev->ip_blocks[i].status.late_initialized) 2893 continue; 2894 if (adev->ip_blocks[i].version->funcs->late_fini) 2895 adev->ip_blocks[i].version->funcs->late_fini((void *)adev); 2896 adev->ip_blocks[i].status.late_initialized = false; 2897 } 2898 2899 amdgpu_ras_fini(adev); 2900 2901 return 0; 2902 } 2903 2904 /** 2905 * amdgpu_device_delayed_init_work_handler - work handler for IB tests 2906 * 2907 * @work: work_struct. 2908 */ 2909 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work) 2910 { 2911 struct amdgpu_device *adev = 2912 container_of(work, struct amdgpu_device, delayed_init_work.work); 2913 int r; 2914 2915 r = amdgpu_ib_ring_tests(adev); 2916 if (r) 2917 DRM_ERROR("ib ring test failed (%d).\n", r); 2918 } 2919 2920 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work) 2921 { 2922 struct amdgpu_device *adev = 2923 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work); 2924 2925 WARN_ON_ONCE(adev->gfx.gfx_off_state); 2926 WARN_ON_ONCE(adev->gfx.gfx_off_req_count); 2927 2928 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true)) 2929 adev->gfx.gfx_off_state = true; 2930 } 2931 2932 /** 2933 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1) 2934 * 2935 * @adev: amdgpu_device pointer 2936 * 2937 * Main suspend function for hardware IPs. The list of all the hardware 2938 * IPs that make up the asic is walked, clockgating is disabled and the 2939 * suspend callbacks are run. suspend puts the hardware and software state 2940 * in each IP into a state suitable for suspend. 2941 * Returns 0 on success, negative error code on failure. 2942 */ 2943 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) 2944 { 2945 int i, r; 2946 2947 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); 2948 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); 2949 2950 /* 2951 * Per PMFW team's suggestion, driver needs to handle gfxoff 2952 * and df cstate features disablement for gpu reset(e.g. Mode1Reset) 2953 * scenario. Add the missing df cstate disablement here. 2954 */ 2955 if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) 2956 dev_warn(adev->dev, "Failed to disallow df cstate"); 2957 2958 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2959 if (!adev->ip_blocks[i].status.valid) 2960 continue; 2961 2962 /* displays are handled separately */ 2963 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE) 2964 continue; 2965 2966 /* XXX handle errors */ 2967 r = adev->ip_blocks[i].version->funcs->suspend(adev); 2968 /* XXX handle errors */ 2969 if (r) { 2970 DRM_ERROR("suspend of IP block <%s> failed %d\n", 2971 adev->ip_blocks[i].version->funcs->name, r); 2972 return r; 2973 } 2974 2975 adev->ip_blocks[i].status.hw = false; 2976 } 2977 2978 return 0; 2979 } 2980 2981 /** 2982 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2) 2983 * 2984 * @adev: amdgpu_device pointer 2985 * 2986 * Main suspend function for hardware IPs. The list of all the hardware 2987 * IPs that make up the asic is walked, clockgating is disabled and the 2988 * suspend callbacks are run. suspend puts the hardware and software state 2989 * in each IP into a state suitable for suspend. 2990 * Returns 0 on success, negative error code on failure. 2991 */ 2992 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) 2993 { 2994 int i, r; 2995 2996 if (adev->in_s0ix) 2997 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry); 2998 2999 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 3000 if (!adev->ip_blocks[i].status.valid) 3001 continue; 3002 /* displays are handled in phase1 */ 3003 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) 3004 continue; 3005 /* PSP lost connection when err_event_athub occurs */ 3006 if (amdgpu_ras_intr_triggered() && 3007 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { 3008 adev->ip_blocks[i].status.hw = false; 3009 continue; 3010 } 3011 3012 /* skip unnecessary suspend if we do not initialize them yet */ 3013 if (adev->gmc.xgmi.pending_reset && 3014 !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 3015 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC || 3016 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 3017 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) { 3018 adev->ip_blocks[i].status.hw = false; 3019 continue; 3020 } 3021 3022 /* skip suspend of gfx/mes and psp for S0ix 3023 * gfx is in gfxoff state, so on resume it will exit gfxoff just 3024 * like at runtime. PSP is also part of the always on hardware 3025 * so no need to suspend it. 3026 */ 3027 if (adev->in_s0ix && 3028 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP || 3029 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || 3030 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES)) 3031 continue; 3032 3033 /* XXX handle errors */ 3034 r = adev->ip_blocks[i].version->funcs->suspend(adev); 3035 /* XXX handle errors */ 3036 if (r) { 3037 DRM_ERROR("suspend of IP block <%s> failed %d\n", 3038 adev->ip_blocks[i].version->funcs->name, r); 3039 } 3040 adev->ip_blocks[i].status.hw = false; 3041 /* handle putting the SMC in the appropriate state */ 3042 if(!amdgpu_sriov_vf(adev)){ 3043 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { 3044 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state); 3045 if (r) { 3046 DRM_ERROR("SMC failed to set mp1 state %d, %d\n", 3047 adev->mp1_state, r); 3048 return r; 3049 } 3050 } 3051 } 3052 } 3053 3054 return 0; 3055 } 3056 3057 /** 3058 * amdgpu_device_ip_suspend - run suspend for hardware IPs 3059 * 3060 * @adev: amdgpu_device pointer 3061 * 3062 * Main suspend function for hardware IPs. The list of all the hardware 3063 * IPs that make up the asic is walked, clockgating is disabled and the 3064 * suspend callbacks are run. suspend puts the hardware and software state 3065 * in each IP into a state suitable for suspend. 3066 * Returns 0 on success, negative error code on failure. 3067 */ 3068 int amdgpu_device_ip_suspend(struct amdgpu_device *adev) 3069 { 3070 int r; 3071 3072 if (amdgpu_sriov_vf(adev)) { 3073 amdgpu_virt_fini_data_exchange(adev); 3074 amdgpu_virt_request_full_gpu(adev, false); 3075 } 3076 3077 r = amdgpu_device_ip_suspend_phase1(adev); 3078 if (r) 3079 return r; 3080 r = amdgpu_device_ip_suspend_phase2(adev); 3081 3082 if (amdgpu_sriov_vf(adev)) 3083 amdgpu_virt_release_full_gpu(adev, false); 3084 3085 return r; 3086 } 3087 3088 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) 3089 { 3090 int i, r; 3091 3092 static enum amd_ip_block_type ip_order[] = { 3093 AMD_IP_BLOCK_TYPE_COMMON, 3094 AMD_IP_BLOCK_TYPE_GMC, 3095 AMD_IP_BLOCK_TYPE_PSP, 3096 AMD_IP_BLOCK_TYPE_IH, 3097 }; 3098 3099 for (i = 0; i < adev->num_ip_blocks; i++) { 3100 int j; 3101 struct amdgpu_ip_block *block; 3102 3103 block = &adev->ip_blocks[i]; 3104 block->status.hw = false; 3105 3106 for (j = 0; j < ARRAY_SIZE(ip_order); j++) { 3107 3108 if (block->version->type != ip_order[j] || 3109 !block->status.valid) 3110 continue; 3111 3112 r = block->version->funcs->hw_init(adev); 3113 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); 3114 if (r) 3115 return r; 3116 block->status.hw = true; 3117 } 3118 } 3119 3120 return 0; 3121 } 3122 3123 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) 3124 { 3125 int i, r; 3126 3127 static enum amd_ip_block_type ip_order[] = { 3128 AMD_IP_BLOCK_TYPE_SMC, 3129 AMD_IP_BLOCK_TYPE_DCE, 3130 AMD_IP_BLOCK_TYPE_GFX, 3131 AMD_IP_BLOCK_TYPE_SDMA, 3132 AMD_IP_BLOCK_TYPE_UVD, 3133 AMD_IP_BLOCK_TYPE_VCE, 3134 AMD_IP_BLOCK_TYPE_VCN 3135 }; 3136 3137 for (i = 0; i < ARRAY_SIZE(ip_order); i++) { 3138 int j; 3139 struct amdgpu_ip_block *block; 3140 3141 for (j = 0; j < adev->num_ip_blocks; j++) { 3142 block = &adev->ip_blocks[j]; 3143 3144 if (block->version->type != ip_order[i] || 3145 !block->status.valid || 3146 block->status.hw) 3147 continue; 3148 3149 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) 3150 r = block->version->funcs->resume(adev); 3151 else 3152 r = block->version->funcs->hw_init(adev); 3153 3154 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); 3155 if (r) 3156 return r; 3157 block->status.hw = true; 3158 } 3159 } 3160 3161 return 0; 3162 } 3163 3164 /** 3165 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs 3166 * 3167 * @adev: amdgpu_device pointer 3168 * 3169 * First resume function for hardware IPs. The list of all the hardware 3170 * IPs that make up the asic is walked and the resume callbacks are run for 3171 * COMMON, GMC, and IH. resume puts the hardware into a functional state 3172 * after a suspend and updates the software state as necessary. This 3173 * function is also used for restoring the GPU after a GPU reset. 3174 * Returns 0 on success, negative error code on failure. 3175 */ 3176 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev) 3177 { 3178 int i, r; 3179 3180 for (i = 0; i < adev->num_ip_blocks; i++) { 3181 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) 3182 continue; 3183 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 3184 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 3185 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || 3186 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) { 3187 3188 r = adev->ip_blocks[i].version->funcs->resume(adev); 3189 if (r) { 3190 DRM_ERROR("resume of IP block <%s> failed %d\n", 3191 adev->ip_blocks[i].version->funcs->name, r); 3192 return r; 3193 } 3194 adev->ip_blocks[i].status.hw = true; 3195 } 3196 } 3197 3198 return 0; 3199 } 3200 3201 /** 3202 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs 3203 * 3204 * @adev: amdgpu_device pointer 3205 * 3206 * First resume function for hardware IPs. The list of all the hardware 3207 * IPs that make up the asic is walked and the resume callbacks are run for 3208 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a 3209 * functional state after a suspend and updates the software state as 3210 * necessary. This function is also used for restoring the GPU after a GPU 3211 * reset. 3212 * Returns 0 on success, negative error code on failure. 3213 */ 3214 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) 3215 { 3216 int i, r; 3217 3218 for (i = 0; i < adev->num_ip_blocks; i++) { 3219 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) 3220 continue; 3221 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 3222 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 3223 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || 3224 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) 3225 continue; 3226 r = adev->ip_blocks[i].version->funcs->resume(adev); 3227 if (r) { 3228 DRM_ERROR("resume of IP block <%s> failed %d\n", 3229 adev->ip_blocks[i].version->funcs->name, r); 3230 return r; 3231 } 3232 adev->ip_blocks[i].status.hw = true; 3233 3234 if (adev->in_s0ix && adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { 3235 /* disable gfxoff for IP resume. The gfxoff will be re-enabled in 3236 * amdgpu_device_resume() after IP resume. 3237 */ 3238 amdgpu_gfx_off_ctrl(adev, false); 3239 DRM_DEBUG("will disable gfxoff for re-initializing other blocks\n"); 3240 } 3241 3242 } 3243 3244 return 0; 3245 } 3246 3247 /** 3248 * amdgpu_device_ip_resume - run resume for hardware IPs 3249 * 3250 * @adev: amdgpu_device pointer 3251 * 3252 * Main resume function for hardware IPs. The hardware IPs 3253 * are split into two resume functions because they are 3254 * are also used in in recovering from a GPU reset and some additional 3255 * steps need to be take between them. In this case (S3/S4) they are 3256 * run sequentially. 3257 * Returns 0 on success, negative error code on failure. 3258 */ 3259 static int amdgpu_device_ip_resume(struct amdgpu_device *adev) 3260 { 3261 int r; 3262 3263 r = amdgpu_amdkfd_resume_iommu(adev); 3264 if (r) 3265 return r; 3266 3267 r = amdgpu_device_ip_resume_phase1(adev); 3268 if (r) 3269 return r; 3270 3271 r = amdgpu_device_fw_loading(adev); 3272 if (r) 3273 return r; 3274 3275 r = amdgpu_device_ip_resume_phase2(adev); 3276 3277 return r; 3278 } 3279 3280 /** 3281 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV 3282 * 3283 * @adev: amdgpu_device pointer 3284 * 3285 * Query the VBIOS data tables to determine if the board supports SR-IOV. 3286 */ 3287 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) 3288 { 3289 if (amdgpu_sriov_vf(adev)) { 3290 if (adev->is_atom_fw) { 3291 if (amdgpu_atomfirmware_gpu_virtualization_supported(adev)) 3292 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; 3293 } else { 3294 if (amdgpu_atombios_has_gpu_virtualization_table(adev)) 3295 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; 3296 } 3297 3298 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)) 3299 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0); 3300 } 3301 } 3302 3303 /** 3304 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic 3305 * 3306 * @asic_type: AMD asic type 3307 * 3308 * Check if there is DC (new modesetting infrastructre) support for an asic. 3309 * returns true if DC has support, false if not. 3310 */ 3311 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) 3312 { 3313 switch (asic_type) { 3314 #ifdef CONFIG_DRM_AMDGPU_SI 3315 case CHIP_HAINAN: 3316 #endif 3317 case CHIP_TOPAZ: 3318 /* chips with no display hardware */ 3319 return false; 3320 #if defined(CONFIG_DRM_AMD_DC) 3321 case CHIP_TAHITI: 3322 case CHIP_PITCAIRN: 3323 case CHIP_VERDE: 3324 case CHIP_OLAND: 3325 /* 3326 * We have systems in the wild with these ASICs that require 3327 * LVDS and VGA support which is not supported with DC. 3328 * 3329 * Fallback to the non-DC driver here by default so as not to 3330 * cause regressions. 3331 */ 3332 #if defined(CONFIG_DRM_AMD_DC_SI) 3333 return amdgpu_dc > 0; 3334 #else 3335 return false; 3336 #endif 3337 case CHIP_BONAIRE: 3338 case CHIP_KAVERI: 3339 case CHIP_KABINI: 3340 case CHIP_MULLINS: 3341 /* 3342 * We have systems in the wild with these ASICs that require 3343 * VGA support which is not supported with DC. 3344 * 3345 * Fallback to the non-DC driver here by default so as not to 3346 * cause regressions. 3347 */ 3348 return amdgpu_dc > 0; 3349 default: 3350 return amdgpu_dc != 0; 3351 #else 3352 default: 3353 if (amdgpu_dc > 0) 3354 DRM_INFO_ONCE("Display Core has been requested via kernel parameter " 3355 "but isn't supported by ASIC, ignoring\n"); 3356 return false; 3357 #endif 3358 } 3359 } 3360 3361 /** 3362 * amdgpu_device_has_dc_support - check if dc is supported 3363 * 3364 * @adev: amdgpu_device pointer 3365 * 3366 * Returns true for supported, false for not supported 3367 */ 3368 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev) 3369 { 3370 if (adev->enable_virtual_display || 3371 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) 3372 return false; 3373 3374 return amdgpu_device_asic_has_dc_support(adev->asic_type); 3375 } 3376 3377 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) 3378 { 3379 struct amdgpu_device *adev = 3380 container_of(__work, struct amdgpu_device, xgmi_reset_work); 3381 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); 3382 3383 /* It's a bug to not have a hive within this function */ 3384 if (WARN_ON(!hive)) 3385 return; 3386 3387 /* 3388 * Use task barrier to synchronize all xgmi reset works across the 3389 * hive. task_barrier_enter and task_barrier_exit will block 3390 * until all the threads running the xgmi reset works reach 3391 * those points. task_barrier_full will do both blocks. 3392 */ 3393 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { 3394 3395 task_barrier_enter(&hive->tb); 3396 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev)); 3397 3398 if (adev->asic_reset_res) 3399 goto fail; 3400 3401 task_barrier_exit(&hive->tb); 3402 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev)); 3403 3404 if (adev->asic_reset_res) 3405 goto fail; 3406 3407 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops && 3408 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count) 3409 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev); 3410 } else { 3411 3412 task_barrier_full(&hive->tb); 3413 adev->asic_reset_res = amdgpu_asic_reset(adev); 3414 } 3415 3416 fail: 3417 if (adev->asic_reset_res) 3418 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s", 3419 adev->asic_reset_res, adev_to_drm(adev)->unique); 3420 amdgpu_put_xgmi_hive(hive); 3421 } 3422 3423 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) 3424 { 3425 char *input = amdgpu_lockup_timeout; 3426 char *timeout_setting = NULL; 3427 int index = 0; 3428 long timeout; 3429 int ret = 0; 3430 3431 /* 3432 * By default timeout for non compute jobs is 10000 3433 * and 60000 for compute jobs. 3434 * In SR-IOV or passthrough mode, timeout for compute 3435 * jobs are 60000 by default. 3436 */ 3437 adev->gfx_timeout = msecs_to_jiffies(10000); 3438 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; 3439 if (amdgpu_sriov_vf(adev)) 3440 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ? 3441 msecs_to_jiffies(60000) : msecs_to_jiffies(10000); 3442 else 3443 adev->compute_timeout = msecs_to_jiffies(60000); 3444 3445 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { 3446 while ((timeout_setting = strsep(&input, ",")) && 3447 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { 3448 ret = kstrtol(timeout_setting, 0, &timeout); 3449 if (ret) 3450 return ret; 3451 3452 if (timeout == 0) { 3453 index++; 3454 continue; 3455 } else if (timeout < 0) { 3456 timeout = MAX_SCHEDULE_TIMEOUT; 3457 dev_warn(adev->dev, "lockup timeout disabled"); 3458 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); 3459 } else { 3460 timeout = msecs_to_jiffies(timeout); 3461 } 3462 3463 switch (index++) { 3464 case 0: 3465 adev->gfx_timeout = timeout; 3466 break; 3467 case 1: 3468 adev->compute_timeout = timeout; 3469 break; 3470 case 2: 3471 adev->sdma_timeout = timeout; 3472 break; 3473 case 3: 3474 adev->video_timeout = timeout; 3475 break; 3476 default: 3477 break; 3478 } 3479 } 3480 /* 3481 * There is only one value specified and 3482 * it should apply to all non-compute jobs. 3483 */ 3484 if (index == 1) { 3485 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; 3486 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev)) 3487 adev->compute_timeout = adev->gfx_timeout; 3488 } 3489 } 3490 3491 return ret; 3492 } 3493 3494 /** 3495 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU 3496 * 3497 * @adev: amdgpu_device pointer 3498 * 3499 * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode 3500 */ 3501 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev) 3502 { 3503 struct iommu_domain *domain; 3504 3505 domain = iommu_get_domain_for_dev(adev->dev); 3506 if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY) 3507 adev->ram_is_direct_mapped = true; 3508 } 3509 3510 static const struct attribute *amdgpu_dev_attributes[] = { 3511 &dev_attr_product_name.attr, 3512 &dev_attr_product_number.attr, 3513 &dev_attr_serial_number.attr, 3514 &dev_attr_pcie_replay_count.attr, 3515 NULL 3516 }; 3517 3518 /** 3519 * amdgpu_device_init - initialize the driver 3520 * 3521 * @adev: amdgpu_device pointer 3522 * @flags: driver flags 3523 * 3524 * Initializes the driver info and hw (all asics). 3525 * Returns 0 for success or an error on failure. 3526 * Called at driver startup. 3527 */ 3528 int amdgpu_device_init(struct amdgpu_device *adev, 3529 uint32_t flags) 3530 { 3531 struct drm_device *ddev = adev_to_drm(adev); 3532 struct pci_dev *pdev = adev->pdev; 3533 int r, i; 3534 bool px = false; 3535 u32 max_MBps; 3536 3537 adev->shutdown = false; 3538 adev->flags = flags; 3539 3540 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST) 3541 adev->asic_type = amdgpu_force_asic_type; 3542 else 3543 adev->asic_type = flags & AMD_ASIC_MASK; 3544 3545 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; 3546 if (amdgpu_emu_mode == 1) 3547 adev->usec_timeout *= 10; 3548 adev->gmc.gart_size = 512 * 1024 * 1024; 3549 adev->accel_working = false; 3550 adev->num_rings = 0; 3551 RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub()); 3552 adev->mman.buffer_funcs = NULL; 3553 adev->mman.buffer_funcs_ring = NULL; 3554 adev->vm_manager.vm_pte_funcs = NULL; 3555 adev->vm_manager.vm_pte_num_scheds = 0; 3556 adev->gmc.gmc_funcs = NULL; 3557 adev->harvest_ip_mask = 0x0; 3558 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); 3559 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 3560 3561 adev->smc_rreg = &amdgpu_invalid_rreg; 3562 adev->smc_wreg = &amdgpu_invalid_wreg; 3563 adev->pcie_rreg = &amdgpu_invalid_rreg; 3564 adev->pcie_wreg = &amdgpu_invalid_wreg; 3565 adev->pciep_rreg = &amdgpu_invalid_rreg; 3566 adev->pciep_wreg = &amdgpu_invalid_wreg; 3567 adev->pcie_rreg64 = &amdgpu_invalid_rreg64; 3568 adev->pcie_wreg64 = &amdgpu_invalid_wreg64; 3569 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg; 3570 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg; 3571 adev->didt_rreg = &amdgpu_invalid_rreg; 3572 adev->didt_wreg = &amdgpu_invalid_wreg; 3573 adev->gc_cac_rreg = &amdgpu_invalid_rreg; 3574 adev->gc_cac_wreg = &amdgpu_invalid_wreg; 3575 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; 3576 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; 3577 3578 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", 3579 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, 3580 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); 3581 3582 /* mutex initialization are all done here so we 3583 * can recall function without having locking issues */ 3584 mutex_init(&adev->firmware.mutex); 3585 mutex_init(&adev->pm.mutex); 3586 mutex_init(&adev->gfx.gpu_clock_mutex); 3587 mutex_init(&adev->srbm_mutex); 3588 mutex_init(&adev->gfx.pipe_reserve_mutex); 3589 mutex_init(&adev->gfx.gfx_off_mutex); 3590 mutex_init(&adev->grbm_idx_mutex); 3591 mutex_init(&adev->mn_lock); 3592 mutex_init(&adev->virt.vf_errors.lock); 3593 hash_init(adev->mn_hash); 3594 mutex_init(&adev->psp.mutex); 3595 mutex_init(&adev->notifier_lock); 3596 mutex_init(&adev->pm.stable_pstate_ctx_lock); 3597 mutex_init(&adev->benchmark_mutex); 3598 3599 amdgpu_device_init_apu_flags(adev); 3600 3601 r = amdgpu_device_check_arguments(adev); 3602 if (r) 3603 return r; 3604 3605 spin_lock_init(&adev->mmio_idx_lock); 3606 spin_lock_init(&adev->smc_idx_lock); 3607 spin_lock_init(&adev->pcie_idx_lock); 3608 spin_lock_init(&adev->uvd_ctx_idx_lock); 3609 spin_lock_init(&adev->didt_idx_lock); 3610 spin_lock_init(&adev->gc_cac_idx_lock); 3611 spin_lock_init(&adev->se_cac_idx_lock); 3612 spin_lock_init(&adev->audio_endpt_idx_lock); 3613 spin_lock_init(&adev->mm_stats.lock); 3614 3615 INIT_LIST_HEAD(&adev->shadow_list); 3616 mutex_init(&adev->shadow_list_lock); 3617 3618 INIT_LIST_HEAD(&adev->reset_list); 3619 3620 INIT_LIST_HEAD(&adev->ras_list); 3621 3622 INIT_DELAYED_WORK(&adev->delayed_init_work, 3623 amdgpu_device_delayed_init_work_handler); 3624 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work, 3625 amdgpu_device_delay_enable_gfx_off); 3626 3627 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func); 3628 3629 adev->gfx.gfx_off_req_count = 1; 3630 adev->gfx.gfx_off_residency = 0; 3631 adev->gfx.gfx_off_entrycount = 0; 3632 adev->pm.ac_power = power_supply_is_system_supplied() > 0; 3633 3634 atomic_set(&adev->throttling_logging_enabled, 1); 3635 /* 3636 * If throttling continues, logging will be performed every minute 3637 * to avoid log flooding. "-1" is subtracted since the thermal 3638 * throttling interrupt comes every second. Thus, the total logging 3639 * interval is 59 seconds(retelimited printk interval) + 1(waiting 3640 * for throttling interrupt) = 60 seconds. 3641 */ 3642 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1); 3643 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE); 3644 3645 /* Registers mapping */ 3646 /* TODO: block userspace mapping of io register */ 3647 if (adev->asic_type >= CHIP_BONAIRE) { 3648 adev->rmmio_base = pci_resource_start(adev->pdev, 5); 3649 adev->rmmio_size = pci_resource_len(adev->pdev, 5); 3650 } else { 3651 adev->rmmio_base = pci_resource_start(adev->pdev, 2); 3652 adev->rmmio_size = pci_resource_len(adev->pdev, 2); 3653 } 3654 3655 for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++) 3656 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN); 3657 3658 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); 3659 if (adev->rmmio == NULL) { 3660 return -ENOMEM; 3661 } 3662 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); 3663 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); 3664 3665 amdgpu_device_get_pcie_info(adev); 3666 3667 if (amdgpu_mcbp) 3668 DRM_INFO("MCBP is enabled\n"); 3669 3670 /* 3671 * Reset domain needs to be present early, before XGMI hive discovered 3672 * (if any) and intitialized to use reset sem and in_gpu reset flag 3673 * early on during init and before calling to RREG32. 3674 */ 3675 adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev"); 3676 if (!adev->reset_domain) 3677 return -ENOMEM; 3678 3679 /* detect hw virtualization here */ 3680 amdgpu_detect_virtualization(adev); 3681 3682 r = amdgpu_device_get_job_timeout_settings(adev); 3683 if (r) { 3684 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n"); 3685 return r; 3686 } 3687 3688 /* early init functions */ 3689 r = amdgpu_device_ip_early_init(adev); 3690 if (r) 3691 return r; 3692 3693 /* Get rid of things like offb */ 3694 r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver); 3695 if (r) 3696 return r; 3697 3698 /* Enable TMZ based on IP_VERSION */ 3699 amdgpu_gmc_tmz_set(adev); 3700 3701 amdgpu_gmc_noretry_set(adev); 3702 /* Need to get xgmi info early to decide the reset behavior*/ 3703 if (adev->gmc.xgmi.supported) { 3704 r = adev->gfxhub.funcs->get_xgmi_info(adev); 3705 if (r) 3706 return r; 3707 } 3708 3709 /* enable PCIE atomic ops */ 3710 if (amdgpu_sriov_vf(adev)) 3711 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *) 3712 adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags == 3713 (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64); 3714 else 3715 adev->have_atomics_support = 3716 !pci_enable_atomic_ops_to_root(adev->pdev, 3717 PCI_EXP_DEVCAP2_ATOMIC_COMP32 | 3718 PCI_EXP_DEVCAP2_ATOMIC_COMP64); 3719 if (!adev->have_atomics_support) 3720 dev_info(adev->dev, "PCIE atomic ops is not supported\n"); 3721 3722 /* doorbell bar mapping and doorbell index init*/ 3723 amdgpu_device_doorbell_init(adev); 3724 3725 if (amdgpu_emu_mode == 1) { 3726 /* post the asic on emulation mode */ 3727 emu_soc_asic_init(adev); 3728 goto fence_driver_init; 3729 } 3730 3731 amdgpu_reset_init(adev); 3732 3733 /* detect if we are with an SRIOV vbios */ 3734 amdgpu_device_detect_sriov_bios(adev); 3735 3736 /* check if we need to reset the asic 3737 * E.g., driver was not cleanly unloaded previously, etc. 3738 */ 3739 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) { 3740 if (adev->gmc.xgmi.num_physical_nodes) { 3741 dev_info(adev->dev, "Pending hive reset.\n"); 3742 adev->gmc.xgmi.pending_reset = true; 3743 /* Only need to init necessary block for SMU to handle the reset */ 3744 for (i = 0; i < adev->num_ip_blocks; i++) { 3745 if (!adev->ip_blocks[i].status.valid) 3746 continue; 3747 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 3748 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 3749 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || 3750 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) { 3751 DRM_DEBUG("IP %s disabled for hw_init.\n", 3752 adev->ip_blocks[i].version->funcs->name); 3753 adev->ip_blocks[i].status.hw = true; 3754 } 3755 } 3756 } else { 3757 r = amdgpu_asic_reset(adev); 3758 if (r) { 3759 dev_err(adev->dev, "asic reset on init failed\n"); 3760 goto failed; 3761 } 3762 } 3763 } 3764 3765 pci_enable_pcie_error_reporting(adev->pdev); 3766 3767 /* Post card if necessary */ 3768 if (amdgpu_device_need_post(adev)) { 3769 if (!adev->bios) { 3770 dev_err(adev->dev, "no vBIOS found\n"); 3771 r = -EINVAL; 3772 goto failed; 3773 } 3774 DRM_INFO("GPU posting now...\n"); 3775 r = amdgpu_device_asic_init(adev); 3776 if (r) { 3777 dev_err(adev->dev, "gpu post error!\n"); 3778 goto failed; 3779 } 3780 } 3781 3782 if (adev->is_atom_fw) { 3783 /* Initialize clocks */ 3784 r = amdgpu_atomfirmware_get_clock_info(adev); 3785 if (r) { 3786 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n"); 3787 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); 3788 goto failed; 3789 } 3790 } else { 3791 /* Initialize clocks */ 3792 r = amdgpu_atombios_get_clock_info(adev); 3793 if (r) { 3794 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); 3795 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); 3796 goto failed; 3797 } 3798 /* init i2c buses */ 3799 if (!amdgpu_device_has_dc_support(adev)) 3800 amdgpu_atombios_i2c_init(adev); 3801 } 3802 3803 fence_driver_init: 3804 /* Fence driver */ 3805 r = amdgpu_fence_driver_sw_init(adev); 3806 if (r) { 3807 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n"); 3808 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0); 3809 goto failed; 3810 } 3811 3812 /* init the mode config */ 3813 drm_mode_config_init(adev_to_drm(adev)); 3814 3815 r = amdgpu_device_ip_init(adev); 3816 if (r) { 3817 /* failed in exclusive mode due to timeout */ 3818 if (amdgpu_sriov_vf(adev) && 3819 !amdgpu_sriov_runtime(adev) && 3820 amdgpu_virt_mmio_blocked(adev) && 3821 !amdgpu_virt_wait_reset(adev)) { 3822 dev_err(adev->dev, "VF exclusive mode timeout\n"); 3823 /* Don't send request since VF is inactive. */ 3824 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; 3825 adev->virt.ops = NULL; 3826 r = -EAGAIN; 3827 goto release_ras_con; 3828 } 3829 dev_err(adev->dev, "amdgpu_device_ip_init failed\n"); 3830 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); 3831 goto release_ras_con; 3832 } 3833 3834 amdgpu_fence_driver_hw_init(adev); 3835 3836 dev_info(adev->dev, 3837 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n", 3838 adev->gfx.config.max_shader_engines, 3839 adev->gfx.config.max_sh_per_se, 3840 adev->gfx.config.max_cu_per_sh, 3841 adev->gfx.cu_info.number); 3842 3843 adev->accel_working = true; 3844 3845 amdgpu_vm_check_compute_bug(adev); 3846 3847 /* Initialize the buffer migration limit. */ 3848 if (amdgpu_moverate >= 0) 3849 max_MBps = amdgpu_moverate; 3850 else 3851 max_MBps = 8; /* Allow 8 MB/s. */ 3852 /* Get a log2 for easy divisions. */ 3853 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps)); 3854 3855 r = amdgpu_pm_sysfs_init(adev); 3856 if (r) { 3857 adev->pm_sysfs_en = false; 3858 DRM_ERROR("registering pm debugfs failed (%d).\n", r); 3859 } else 3860 adev->pm_sysfs_en = true; 3861 3862 r = amdgpu_ucode_sysfs_init(adev); 3863 if (r) { 3864 adev->ucode_sysfs_en = false; 3865 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r); 3866 } else 3867 adev->ucode_sysfs_en = true; 3868 3869 r = amdgpu_psp_sysfs_init(adev); 3870 if (r) { 3871 adev->psp_sysfs_en = false; 3872 if (!amdgpu_sriov_vf(adev)) 3873 DRM_ERROR("Creating psp sysfs failed\n"); 3874 } else 3875 adev->psp_sysfs_en = true; 3876 3877 /* 3878 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost. 3879 * Otherwise the mgpu fan boost feature will be skipped due to the 3880 * gpu instance is counted less. 3881 */ 3882 amdgpu_register_gpu_instance(adev); 3883 3884 /* enable clockgating, etc. after ib tests, etc. since some blocks require 3885 * explicit gating rather than handling it automatically. 3886 */ 3887 if (!adev->gmc.xgmi.pending_reset) { 3888 r = amdgpu_device_ip_late_init(adev); 3889 if (r) { 3890 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n"); 3891 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r); 3892 goto release_ras_con; 3893 } 3894 /* must succeed. */ 3895 amdgpu_ras_resume(adev); 3896 queue_delayed_work(system_wq, &adev->delayed_init_work, 3897 msecs_to_jiffies(AMDGPU_RESUME_MS)); 3898 } 3899 3900 if (amdgpu_sriov_vf(adev)) 3901 flush_delayed_work(&adev->delayed_init_work); 3902 3903 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes); 3904 if (r) 3905 dev_err(adev->dev, "Could not create amdgpu device attr\n"); 3906 3907 if (IS_ENABLED(CONFIG_PERF_EVENTS)) 3908 r = amdgpu_pmu_init(adev); 3909 if (r) 3910 dev_err(adev->dev, "amdgpu_pmu_init failed\n"); 3911 3912 /* Have stored pci confspace at hand for restore in sudden PCI error */ 3913 if (amdgpu_device_cache_pci_state(adev->pdev)) 3914 pci_restore_state(pdev); 3915 3916 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */ 3917 /* this will fail for cards that aren't VGA class devices, just 3918 * ignore it */ 3919 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) 3920 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode); 3921 3922 if (amdgpu_device_supports_px(ddev)) { 3923 px = true; 3924 vga_switcheroo_register_client(adev->pdev, 3925 &amdgpu_switcheroo_ops, px); 3926 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); 3927 } 3928 3929 if (adev->gmc.xgmi.pending_reset) 3930 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work, 3931 msecs_to_jiffies(AMDGPU_RESUME_MS)); 3932 3933 amdgpu_device_check_iommu_direct_map(adev); 3934 3935 return 0; 3936 3937 release_ras_con: 3938 amdgpu_release_ras_context(adev); 3939 3940 failed: 3941 amdgpu_vf_error_trans_all(adev); 3942 3943 return r; 3944 } 3945 3946 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev) 3947 { 3948 3949 /* Clear all CPU mappings pointing to this device */ 3950 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1); 3951 3952 /* Unmap all mapped bars - Doorbell, registers and VRAM */ 3953 amdgpu_device_doorbell_fini(adev); 3954 3955 iounmap(adev->rmmio); 3956 adev->rmmio = NULL; 3957 if (adev->mman.aper_base_kaddr) 3958 iounmap(adev->mman.aper_base_kaddr); 3959 adev->mman.aper_base_kaddr = NULL; 3960 3961 /* Memory manager related */ 3962 if (!adev->gmc.xgmi.connected_to_cpu) { 3963 arch_phys_wc_del(adev->gmc.vram_mtrr); 3964 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); 3965 } 3966 } 3967 3968 /** 3969 * amdgpu_device_fini_hw - tear down the driver 3970 * 3971 * @adev: amdgpu_device pointer 3972 * 3973 * Tear down the driver info (all asics). 3974 * Called at driver shutdown. 3975 */ 3976 void amdgpu_device_fini_hw(struct amdgpu_device *adev) 3977 { 3978 dev_info(adev->dev, "amdgpu: finishing device.\n"); 3979 flush_delayed_work(&adev->delayed_init_work); 3980 adev->shutdown = true; 3981 3982 /* make sure IB test finished before entering exclusive mode 3983 * to avoid preemption on IB test 3984 * */ 3985 if (amdgpu_sriov_vf(adev)) { 3986 amdgpu_virt_request_full_gpu(adev, false); 3987 amdgpu_virt_fini_data_exchange(adev); 3988 } 3989 3990 /* disable all interrupts */ 3991 amdgpu_irq_disable_all(adev); 3992 if (adev->mode_info.mode_config_initialized){ 3993 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev))) 3994 drm_helper_force_disable_all(adev_to_drm(adev)); 3995 else 3996 drm_atomic_helper_shutdown(adev_to_drm(adev)); 3997 } 3998 amdgpu_fence_driver_hw_fini(adev); 3999 4000 if (adev->mman.initialized) { 4001 flush_delayed_work(&adev->mman.bdev.wq); 4002 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); 4003 } 4004 4005 if (adev->pm_sysfs_en) 4006 amdgpu_pm_sysfs_fini(adev); 4007 if (adev->ucode_sysfs_en) 4008 amdgpu_ucode_sysfs_fini(adev); 4009 if (adev->psp_sysfs_en) 4010 amdgpu_psp_sysfs_fini(adev); 4011 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes); 4012 4013 /* disable ras feature must before hw fini */ 4014 amdgpu_ras_pre_fini(adev); 4015 4016 amdgpu_device_ip_fini_early(adev); 4017 4018 amdgpu_irq_fini_hw(adev); 4019 4020 if (adev->mman.initialized) 4021 ttm_device_clear_dma_mappings(&adev->mman.bdev); 4022 4023 amdgpu_gart_dummy_page_fini(adev); 4024 4025 amdgpu_device_unmap_mmio(adev); 4026 4027 } 4028 4029 void amdgpu_device_fini_sw(struct amdgpu_device *adev) 4030 { 4031 int idx; 4032 4033 amdgpu_fence_driver_sw_fini(adev); 4034 amdgpu_device_ip_fini(adev); 4035 release_firmware(adev->firmware.gpu_info_fw); 4036 adev->firmware.gpu_info_fw = NULL; 4037 adev->accel_working = false; 4038 dma_fence_put(rcu_dereference_protected(adev->gang_submit, true)); 4039 4040 amdgpu_reset_fini(adev); 4041 4042 /* free i2c buses */ 4043 if (!amdgpu_device_has_dc_support(adev)) 4044 amdgpu_i2c_fini(adev); 4045 4046 if (amdgpu_emu_mode != 1) 4047 amdgpu_atombios_fini(adev); 4048 4049 kfree(adev->bios); 4050 adev->bios = NULL; 4051 if (amdgpu_device_supports_px(adev_to_drm(adev))) { 4052 vga_switcheroo_unregister_client(adev->pdev); 4053 vga_switcheroo_fini_domain_pm_ops(adev->dev); 4054 } 4055 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) 4056 vga_client_unregister(adev->pdev); 4057 4058 if (drm_dev_enter(adev_to_drm(adev), &idx)) { 4059 4060 iounmap(adev->rmmio); 4061 adev->rmmio = NULL; 4062 amdgpu_device_doorbell_fini(adev); 4063 drm_dev_exit(idx); 4064 } 4065 4066 if (IS_ENABLED(CONFIG_PERF_EVENTS)) 4067 amdgpu_pmu_fini(adev); 4068 if (adev->mman.discovery_bin) 4069 amdgpu_discovery_fini(adev); 4070 4071 amdgpu_reset_put_reset_domain(adev->reset_domain); 4072 adev->reset_domain = NULL; 4073 4074 kfree(adev->pci_state); 4075 4076 } 4077 4078 /** 4079 * amdgpu_device_evict_resources - evict device resources 4080 * @adev: amdgpu device object 4081 * 4082 * Evicts all ttm device resources(vram BOs, gart table) from the lru list 4083 * of the vram memory type. Mainly used for evicting device resources 4084 * at suspend time. 4085 * 4086 */ 4087 static int amdgpu_device_evict_resources(struct amdgpu_device *adev) 4088 { 4089 int ret; 4090 4091 /* No need to evict vram on APUs for suspend to ram or s2idle */ 4092 if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU)) 4093 return 0; 4094 4095 ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM); 4096 if (ret) 4097 DRM_WARN("evicting device resources failed\n"); 4098 return ret; 4099 } 4100 4101 /* 4102 * Suspend & resume. 4103 */ 4104 /** 4105 * amdgpu_device_suspend - initiate device suspend 4106 * 4107 * @dev: drm dev pointer 4108 * @fbcon : notify the fbdev of suspend 4109 * 4110 * Puts the hw in the suspend state (all asics). 4111 * Returns 0 for success or an error on failure. 4112 * Called at driver suspend. 4113 */ 4114 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) 4115 { 4116 struct amdgpu_device *adev = drm_to_adev(dev); 4117 int r = 0; 4118 4119 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 4120 return 0; 4121 4122 adev->in_suspend = true; 4123 4124 /* Evict the majority of BOs before grabbing the full access */ 4125 r = amdgpu_device_evict_resources(adev); 4126 if (r) 4127 return r; 4128 4129 if (amdgpu_sriov_vf(adev)) { 4130 amdgpu_virt_fini_data_exchange(adev); 4131 r = amdgpu_virt_request_full_gpu(adev, false); 4132 if (r) 4133 return r; 4134 } 4135 4136 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3)) 4137 DRM_WARN("smart shift update failed\n"); 4138 4139 drm_kms_helper_poll_disable(dev); 4140 4141 if (fbcon) 4142 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true); 4143 4144 cancel_delayed_work_sync(&adev->delayed_init_work); 4145 4146 amdgpu_ras_suspend(adev); 4147 4148 amdgpu_device_ip_suspend_phase1(adev); 4149 4150 if (!adev->in_s0ix) 4151 amdgpu_amdkfd_suspend(adev, adev->in_runpm); 4152 4153 r = amdgpu_device_evict_resources(adev); 4154 if (r) 4155 return r; 4156 4157 amdgpu_fence_driver_hw_fini(adev); 4158 4159 amdgpu_device_ip_suspend_phase2(adev); 4160 4161 if (amdgpu_sriov_vf(adev)) 4162 amdgpu_virt_release_full_gpu(adev, false); 4163 4164 return 0; 4165 } 4166 4167 /** 4168 * amdgpu_device_resume - initiate device resume 4169 * 4170 * @dev: drm dev pointer 4171 * @fbcon : notify the fbdev of resume 4172 * 4173 * Bring the hw back to operating state (all asics). 4174 * Returns 0 for success or an error on failure. 4175 * Called at driver resume. 4176 */ 4177 int amdgpu_device_resume(struct drm_device *dev, bool fbcon) 4178 { 4179 struct amdgpu_device *adev = drm_to_adev(dev); 4180 int r = 0; 4181 4182 if (amdgpu_sriov_vf(adev)) { 4183 r = amdgpu_virt_request_full_gpu(adev, true); 4184 if (r) 4185 return r; 4186 } 4187 4188 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 4189 return 0; 4190 4191 if (adev->in_s0ix) 4192 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry); 4193 4194 /* post card */ 4195 if (amdgpu_device_need_post(adev)) { 4196 r = amdgpu_device_asic_init(adev); 4197 if (r) 4198 dev_err(adev->dev, "amdgpu asic init failed\n"); 4199 } 4200 4201 r = amdgpu_device_ip_resume(adev); 4202 4203 if (r) { 4204 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r); 4205 goto exit; 4206 } 4207 amdgpu_fence_driver_hw_init(adev); 4208 4209 r = amdgpu_device_ip_late_init(adev); 4210 if (r) 4211 goto exit; 4212 4213 queue_delayed_work(system_wq, &adev->delayed_init_work, 4214 msecs_to_jiffies(AMDGPU_RESUME_MS)); 4215 4216 if (!adev->in_s0ix) { 4217 r = amdgpu_amdkfd_resume(adev, adev->in_runpm); 4218 if (r) 4219 goto exit; 4220 } 4221 4222 exit: 4223 if (amdgpu_sriov_vf(adev)) { 4224 amdgpu_virt_init_data_exchange(adev); 4225 amdgpu_virt_release_full_gpu(adev, true); 4226 } 4227 4228 if (r) 4229 return r; 4230 4231 /* Make sure IB tests flushed */ 4232 flush_delayed_work(&adev->delayed_init_work); 4233 4234 if (adev->in_s0ix) { 4235 /* re-enable gfxoff after IP resume. This re-enables gfxoff after 4236 * it was disabled for IP resume in amdgpu_device_ip_resume_phase2(). 4237 */ 4238 amdgpu_gfx_off_ctrl(adev, true); 4239 DRM_DEBUG("will enable gfxoff for the mission mode\n"); 4240 } 4241 if (fbcon) 4242 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false); 4243 4244 drm_kms_helper_poll_enable(dev); 4245 4246 amdgpu_ras_resume(adev); 4247 4248 if (adev->mode_info.num_crtc) { 4249 /* 4250 * Most of the connector probing functions try to acquire runtime pm 4251 * refs to ensure that the GPU is powered on when connector polling is 4252 * performed. Since we're calling this from a runtime PM callback, 4253 * trying to acquire rpm refs will cause us to deadlock. 4254 * 4255 * Since we're guaranteed to be holding the rpm lock, it's safe to 4256 * temporarily disable the rpm helpers so this doesn't deadlock us. 4257 */ 4258 #ifdef CONFIG_PM 4259 dev->dev->power.disable_depth++; 4260 #endif 4261 if (!adev->dc_enabled) 4262 drm_helper_hpd_irq_event(dev); 4263 else 4264 drm_kms_helper_hotplug_event(dev); 4265 #ifdef CONFIG_PM 4266 dev->dev->power.disable_depth--; 4267 #endif 4268 } 4269 adev->in_suspend = false; 4270 4271 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0)) 4272 DRM_WARN("smart shift update failed\n"); 4273 4274 return 0; 4275 } 4276 4277 /** 4278 * amdgpu_device_ip_check_soft_reset - did soft reset succeed 4279 * 4280 * @adev: amdgpu_device pointer 4281 * 4282 * The list of all the hardware IPs that make up the asic is walked and 4283 * the check_soft_reset callbacks are run. check_soft_reset determines 4284 * if the asic is still hung or not. 4285 * Returns true if any of the IPs are still in a hung state, false if not. 4286 */ 4287 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev) 4288 { 4289 int i; 4290 bool asic_hang = false; 4291 4292 if (amdgpu_sriov_vf(adev)) 4293 return true; 4294 4295 if (amdgpu_asic_need_full_reset(adev)) 4296 return true; 4297 4298 for (i = 0; i < adev->num_ip_blocks; i++) { 4299 if (!adev->ip_blocks[i].status.valid) 4300 continue; 4301 if (adev->ip_blocks[i].version->funcs->check_soft_reset) 4302 adev->ip_blocks[i].status.hang = 4303 adev->ip_blocks[i].version->funcs->check_soft_reset(adev); 4304 if (adev->ip_blocks[i].status.hang) { 4305 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name); 4306 asic_hang = true; 4307 } 4308 } 4309 return asic_hang; 4310 } 4311 4312 /** 4313 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset 4314 * 4315 * @adev: amdgpu_device pointer 4316 * 4317 * The list of all the hardware IPs that make up the asic is walked and the 4318 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset 4319 * handles any IP specific hardware or software state changes that are 4320 * necessary for a soft reset to succeed. 4321 * Returns 0 on success, negative error code on failure. 4322 */ 4323 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev) 4324 { 4325 int i, r = 0; 4326 4327 for (i = 0; i < adev->num_ip_blocks; i++) { 4328 if (!adev->ip_blocks[i].status.valid) 4329 continue; 4330 if (adev->ip_blocks[i].status.hang && 4331 adev->ip_blocks[i].version->funcs->pre_soft_reset) { 4332 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev); 4333 if (r) 4334 return r; 4335 } 4336 } 4337 4338 return 0; 4339 } 4340 4341 /** 4342 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed 4343 * 4344 * @adev: amdgpu_device pointer 4345 * 4346 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu 4347 * reset is necessary to recover. 4348 * Returns true if a full asic reset is required, false if not. 4349 */ 4350 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev) 4351 { 4352 int i; 4353 4354 if (amdgpu_asic_need_full_reset(adev)) 4355 return true; 4356 4357 for (i = 0; i < adev->num_ip_blocks; i++) { 4358 if (!adev->ip_blocks[i].status.valid) 4359 continue; 4360 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) || 4361 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) || 4362 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) || 4363 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) || 4364 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { 4365 if (adev->ip_blocks[i].status.hang) { 4366 dev_info(adev->dev, "Some block need full reset!\n"); 4367 return true; 4368 } 4369 } 4370 } 4371 return false; 4372 } 4373 4374 /** 4375 * amdgpu_device_ip_soft_reset - do a soft reset 4376 * 4377 * @adev: amdgpu_device pointer 4378 * 4379 * The list of all the hardware IPs that make up the asic is walked and the 4380 * soft_reset callbacks are run if the block is hung. soft_reset handles any 4381 * IP specific hardware or software state changes that are necessary to soft 4382 * reset the IP. 4383 * Returns 0 on success, negative error code on failure. 4384 */ 4385 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev) 4386 { 4387 int i, r = 0; 4388 4389 for (i = 0; i < adev->num_ip_blocks; i++) { 4390 if (!adev->ip_blocks[i].status.valid) 4391 continue; 4392 if (adev->ip_blocks[i].status.hang && 4393 adev->ip_blocks[i].version->funcs->soft_reset) { 4394 r = adev->ip_blocks[i].version->funcs->soft_reset(adev); 4395 if (r) 4396 return r; 4397 } 4398 } 4399 4400 return 0; 4401 } 4402 4403 /** 4404 * amdgpu_device_ip_post_soft_reset - clean up from soft reset 4405 * 4406 * @adev: amdgpu_device pointer 4407 * 4408 * The list of all the hardware IPs that make up the asic is walked and the 4409 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset 4410 * handles any IP specific hardware or software state changes that are 4411 * necessary after the IP has been soft reset. 4412 * Returns 0 on success, negative error code on failure. 4413 */ 4414 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev) 4415 { 4416 int i, r = 0; 4417 4418 for (i = 0; i < adev->num_ip_blocks; i++) { 4419 if (!adev->ip_blocks[i].status.valid) 4420 continue; 4421 if (adev->ip_blocks[i].status.hang && 4422 adev->ip_blocks[i].version->funcs->post_soft_reset) 4423 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev); 4424 if (r) 4425 return r; 4426 } 4427 4428 return 0; 4429 } 4430 4431 /** 4432 * amdgpu_device_recover_vram - Recover some VRAM contents 4433 * 4434 * @adev: amdgpu_device pointer 4435 * 4436 * Restores the contents of VRAM buffers from the shadows in GTT. Used to 4437 * restore things like GPUVM page tables after a GPU reset where 4438 * the contents of VRAM might be lost. 4439 * 4440 * Returns: 4441 * 0 on success, negative error code on failure. 4442 */ 4443 static int amdgpu_device_recover_vram(struct amdgpu_device *adev) 4444 { 4445 struct dma_fence *fence = NULL, *next = NULL; 4446 struct amdgpu_bo *shadow; 4447 struct amdgpu_bo_vm *vmbo; 4448 long r = 1, tmo; 4449 4450 if (amdgpu_sriov_runtime(adev)) 4451 tmo = msecs_to_jiffies(8000); 4452 else 4453 tmo = msecs_to_jiffies(100); 4454 4455 dev_info(adev->dev, "recover vram bo from shadow start\n"); 4456 mutex_lock(&adev->shadow_list_lock); 4457 list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) { 4458 shadow = &vmbo->bo; 4459 /* No need to recover an evicted BO */ 4460 if (shadow->tbo.resource->mem_type != TTM_PL_TT || 4461 shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET || 4462 shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM) 4463 continue; 4464 4465 r = amdgpu_bo_restore_shadow(shadow, &next); 4466 if (r) 4467 break; 4468 4469 if (fence) { 4470 tmo = dma_fence_wait_timeout(fence, false, tmo); 4471 dma_fence_put(fence); 4472 fence = next; 4473 if (tmo == 0) { 4474 r = -ETIMEDOUT; 4475 break; 4476 } else if (tmo < 0) { 4477 r = tmo; 4478 break; 4479 } 4480 } else { 4481 fence = next; 4482 } 4483 } 4484 mutex_unlock(&adev->shadow_list_lock); 4485 4486 if (fence) 4487 tmo = dma_fence_wait_timeout(fence, false, tmo); 4488 dma_fence_put(fence); 4489 4490 if (r < 0 || tmo <= 0) { 4491 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo); 4492 return -EIO; 4493 } 4494 4495 dev_info(adev->dev, "recover vram bo from shadow done\n"); 4496 return 0; 4497 } 4498 4499 4500 /** 4501 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf 4502 * 4503 * @adev: amdgpu_device pointer 4504 * @from_hypervisor: request from hypervisor 4505 * 4506 * do VF FLR and reinitialize Asic 4507 * return 0 means succeeded otherwise failed 4508 */ 4509 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, 4510 bool from_hypervisor) 4511 { 4512 int r; 4513 struct amdgpu_hive_info *hive = NULL; 4514 int retry_limit = 0; 4515 4516 retry: 4517 amdgpu_amdkfd_pre_reset(adev); 4518 4519 if (from_hypervisor) 4520 r = amdgpu_virt_request_full_gpu(adev, true); 4521 else 4522 r = amdgpu_virt_reset_gpu(adev); 4523 if (r) 4524 return r; 4525 4526 /* Resume IP prior to SMC */ 4527 r = amdgpu_device_ip_reinit_early_sriov(adev); 4528 if (r) 4529 goto error; 4530 4531 amdgpu_virt_init_data_exchange(adev); 4532 4533 r = amdgpu_device_fw_loading(adev); 4534 if (r) 4535 return r; 4536 4537 /* now we are okay to resume SMC/CP/SDMA */ 4538 r = amdgpu_device_ip_reinit_late_sriov(adev); 4539 if (r) 4540 goto error; 4541 4542 hive = amdgpu_get_xgmi_hive(adev); 4543 /* Update PSP FW topology after reset */ 4544 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) 4545 r = amdgpu_xgmi_update_topology(hive, adev); 4546 4547 if (hive) 4548 amdgpu_put_xgmi_hive(hive); 4549 4550 if (!r) { 4551 amdgpu_irq_gpu_reset_resume_helper(adev); 4552 r = amdgpu_ib_ring_tests(adev); 4553 4554 amdgpu_amdkfd_post_reset(adev); 4555 } 4556 4557 error: 4558 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { 4559 amdgpu_inc_vram_lost(adev); 4560 r = amdgpu_device_recover_vram(adev); 4561 } 4562 amdgpu_virt_release_full_gpu(adev, true); 4563 4564 if (AMDGPU_RETRY_SRIOV_RESET(r)) { 4565 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) { 4566 retry_limit++; 4567 goto retry; 4568 } else 4569 DRM_ERROR("GPU reset retry is beyond the retry limit\n"); 4570 } 4571 4572 return r; 4573 } 4574 4575 /** 4576 * amdgpu_device_has_job_running - check if there is any job in mirror list 4577 * 4578 * @adev: amdgpu_device pointer 4579 * 4580 * check if there is any job in mirror list 4581 */ 4582 bool amdgpu_device_has_job_running(struct amdgpu_device *adev) 4583 { 4584 int i; 4585 struct drm_sched_job *job; 4586 4587 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 4588 struct amdgpu_ring *ring = adev->rings[i]; 4589 4590 if (!ring || !ring->sched.thread) 4591 continue; 4592 4593 spin_lock(&ring->sched.job_list_lock); 4594 job = list_first_entry_or_null(&ring->sched.pending_list, 4595 struct drm_sched_job, list); 4596 spin_unlock(&ring->sched.job_list_lock); 4597 if (job) 4598 return true; 4599 } 4600 return false; 4601 } 4602 4603 /** 4604 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery 4605 * 4606 * @adev: amdgpu_device pointer 4607 * 4608 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover 4609 * a hung GPU. 4610 */ 4611 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) 4612 { 4613 4614 if (amdgpu_gpu_recovery == 0) 4615 goto disabled; 4616 4617 /* Skip soft reset check in fatal error mode */ 4618 if (!amdgpu_ras_is_poison_mode_supported(adev)) 4619 return true; 4620 4621 if (!amdgpu_device_ip_check_soft_reset(adev)) { 4622 dev_info(adev->dev,"Timeout, but no hardware hang detected.\n"); 4623 return false; 4624 } 4625 4626 if (amdgpu_sriov_vf(adev)) 4627 return true; 4628 4629 if (amdgpu_gpu_recovery == -1) { 4630 switch (adev->asic_type) { 4631 #ifdef CONFIG_DRM_AMDGPU_SI 4632 case CHIP_VERDE: 4633 case CHIP_TAHITI: 4634 case CHIP_PITCAIRN: 4635 case CHIP_OLAND: 4636 case CHIP_HAINAN: 4637 #endif 4638 #ifdef CONFIG_DRM_AMDGPU_CIK 4639 case CHIP_KAVERI: 4640 case CHIP_KABINI: 4641 case CHIP_MULLINS: 4642 #endif 4643 case CHIP_CARRIZO: 4644 case CHIP_STONEY: 4645 case CHIP_CYAN_SKILLFISH: 4646 goto disabled; 4647 default: 4648 break; 4649 } 4650 } 4651 4652 return true; 4653 4654 disabled: 4655 dev_info(adev->dev, "GPU recovery disabled.\n"); 4656 return false; 4657 } 4658 4659 int amdgpu_device_mode1_reset(struct amdgpu_device *adev) 4660 { 4661 u32 i; 4662 int ret = 0; 4663 4664 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 4665 4666 dev_info(adev->dev, "GPU mode1 reset\n"); 4667 4668 /* disable BM */ 4669 pci_clear_master(adev->pdev); 4670 4671 amdgpu_device_cache_pci_state(adev->pdev); 4672 4673 if (amdgpu_dpm_is_mode1_reset_supported(adev)) { 4674 dev_info(adev->dev, "GPU smu mode1 reset\n"); 4675 ret = amdgpu_dpm_mode1_reset(adev); 4676 } else { 4677 dev_info(adev->dev, "GPU psp mode1 reset\n"); 4678 ret = psp_gpu_reset(adev); 4679 } 4680 4681 if (ret) 4682 dev_err(adev->dev, "GPU mode1 reset failed\n"); 4683 4684 amdgpu_device_load_pci_state(adev->pdev); 4685 4686 /* wait for asic to come out of reset */ 4687 for (i = 0; i < adev->usec_timeout; i++) { 4688 u32 memsize = adev->nbio.funcs->get_memsize(adev); 4689 4690 if (memsize != 0xffffffff) 4691 break; 4692 udelay(1); 4693 } 4694 4695 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 4696 return ret; 4697 } 4698 4699 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, 4700 struct amdgpu_reset_context *reset_context) 4701 { 4702 int i, r = 0; 4703 struct amdgpu_job *job = NULL; 4704 bool need_full_reset = 4705 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 4706 4707 if (reset_context->reset_req_dev == adev) 4708 job = reset_context->job; 4709 4710 if (amdgpu_sriov_vf(adev)) { 4711 /* stop the data exchange thread */ 4712 amdgpu_virt_fini_data_exchange(adev); 4713 } 4714 4715 amdgpu_fence_driver_isr_toggle(adev, true); 4716 4717 /* block all schedulers and reset given job's ring */ 4718 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 4719 struct amdgpu_ring *ring = adev->rings[i]; 4720 4721 if (!ring || !ring->sched.thread) 4722 continue; 4723 4724 /*clear job fence from fence drv to avoid force_completion 4725 *leave NULL and vm flush fence in fence drv */ 4726 amdgpu_fence_driver_clear_job_fences(ring); 4727 4728 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ 4729 amdgpu_fence_driver_force_completion(ring); 4730 } 4731 4732 amdgpu_fence_driver_isr_toggle(adev, false); 4733 4734 if (job && job->vm) 4735 drm_sched_increase_karma(&job->base); 4736 4737 r = amdgpu_reset_prepare_hwcontext(adev, reset_context); 4738 /* If reset handler not implemented, continue; otherwise return */ 4739 if (r == -ENOSYS) 4740 r = 0; 4741 else 4742 return r; 4743 4744 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */ 4745 if (!amdgpu_sriov_vf(adev)) { 4746 4747 if (!need_full_reset) 4748 need_full_reset = amdgpu_device_ip_need_full_reset(adev); 4749 4750 if (!need_full_reset && amdgpu_gpu_recovery) { 4751 amdgpu_device_ip_pre_soft_reset(adev); 4752 r = amdgpu_device_ip_soft_reset(adev); 4753 amdgpu_device_ip_post_soft_reset(adev); 4754 if (r || amdgpu_device_ip_check_soft_reset(adev)) { 4755 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n"); 4756 need_full_reset = true; 4757 } 4758 } 4759 4760 if (need_full_reset) 4761 r = amdgpu_device_ip_suspend(adev); 4762 if (need_full_reset) 4763 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 4764 else 4765 clear_bit(AMDGPU_NEED_FULL_RESET, 4766 &reset_context->flags); 4767 } 4768 4769 return r; 4770 } 4771 4772 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev) 4773 { 4774 int i; 4775 4776 lockdep_assert_held(&adev->reset_domain->sem); 4777 4778 for (i = 0; i < adev->num_regs; i++) { 4779 adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]); 4780 trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i], 4781 adev->reset_dump_reg_value[i]); 4782 } 4783 4784 return 0; 4785 } 4786 4787 #ifdef CONFIG_DEV_COREDUMP 4788 static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset, 4789 size_t count, void *data, size_t datalen) 4790 { 4791 struct drm_printer p; 4792 struct amdgpu_device *adev = data; 4793 struct drm_print_iterator iter; 4794 int i; 4795 4796 iter.data = buffer; 4797 iter.offset = 0; 4798 iter.start = offset; 4799 iter.remain = count; 4800 4801 p = drm_coredump_printer(&iter); 4802 4803 drm_printf(&p, "**** AMDGPU Device Coredump ****\n"); 4804 drm_printf(&p, "kernel: " UTS_RELEASE "\n"); 4805 drm_printf(&p, "module: " KBUILD_MODNAME "\n"); 4806 drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec); 4807 if (adev->reset_task_info.pid) 4808 drm_printf(&p, "process_name: %s PID: %d\n", 4809 adev->reset_task_info.process_name, 4810 adev->reset_task_info.pid); 4811 4812 if (adev->reset_vram_lost) 4813 drm_printf(&p, "VRAM is lost due to GPU reset!\n"); 4814 if (adev->num_regs) { 4815 drm_printf(&p, "AMDGPU register dumps:\nOffset: Value:\n"); 4816 4817 for (i = 0; i < adev->num_regs; i++) 4818 drm_printf(&p, "0x%08x: 0x%08x\n", 4819 adev->reset_dump_reg_list[i], 4820 adev->reset_dump_reg_value[i]); 4821 } 4822 4823 return count - iter.remain; 4824 } 4825 4826 static void amdgpu_devcoredump_free(void *data) 4827 { 4828 } 4829 4830 static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev) 4831 { 4832 struct drm_device *dev = adev_to_drm(adev); 4833 4834 ktime_get_ts64(&adev->reset_time); 4835 dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL, 4836 amdgpu_devcoredump_read, amdgpu_devcoredump_free); 4837 } 4838 #endif 4839 4840 int amdgpu_do_asic_reset(struct list_head *device_list_handle, 4841 struct amdgpu_reset_context *reset_context) 4842 { 4843 struct amdgpu_device *tmp_adev = NULL; 4844 bool need_full_reset, skip_hw_reset, vram_lost = false; 4845 int r = 0; 4846 bool gpu_reset_for_dev_remove = 0; 4847 4848 /* Try reset handler method first */ 4849 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, 4850 reset_list); 4851 amdgpu_reset_reg_dumps(tmp_adev); 4852 4853 reset_context->reset_device_list = device_list_handle; 4854 r = amdgpu_reset_perform_reset(tmp_adev, reset_context); 4855 /* If reset handler not implemented, continue; otherwise return */ 4856 if (r == -ENOSYS) 4857 r = 0; 4858 else 4859 return r; 4860 4861 /* Reset handler not implemented, use the default method */ 4862 need_full_reset = 4863 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 4864 skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags); 4865 4866 gpu_reset_for_dev_remove = 4867 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) && 4868 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 4869 4870 /* 4871 * ASIC reset has to be done on all XGMI hive nodes ASAP 4872 * to allow proper links negotiation in FW (within 1 sec) 4873 */ 4874 if (!skip_hw_reset && need_full_reset) { 4875 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 4876 /* For XGMI run all resets in parallel to speed up the process */ 4877 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { 4878 tmp_adev->gmc.xgmi.pending_reset = false; 4879 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work)) 4880 r = -EALREADY; 4881 } else 4882 r = amdgpu_asic_reset(tmp_adev); 4883 4884 if (r) { 4885 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s", 4886 r, adev_to_drm(tmp_adev)->unique); 4887 break; 4888 } 4889 } 4890 4891 /* For XGMI wait for all resets to complete before proceed */ 4892 if (!r) { 4893 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 4894 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { 4895 flush_work(&tmp_adev->xgmi_reset_work); 4896 r = tmp_adev->asic_reset_res; 4897 if (r) 4898 break; 4899 } 4900 } 4901 } 4902 } 4903 4904 if (!r && amdgpu_ras_intr_triggered()) { 4905 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 4906 if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops && 4907 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count) 4908 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev); 4909 } 4910 4911 amdgpu_ras_intr_cleared(); 4912 } 4913 4914 /* Since the mode1 reset affects base ip blocks, the 4915 * phase1 ip blocks need to be resumed. Otherwise there 4916 * will be a BIOS signature error and the psp bootloader 4917 * can't load kdb on the next amdgpu install. 4918 */ 4919 if (gpu_reset_for_dev_remove) { 4920 list_for_each_entry(tmp_adev, device_list_handle, reset_list) 4921 amdgpu_device_ip_resume_phase1(tmp_adev); 4922 4923 goto end; 4924 } 4925 4926 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 4927 if (need_full_reset) { 4928 /* post card */ 4929 r = amdgpu_device_asic_init(tmp_adev); 4930 if (r) { 4931 dev_warn(tmp_adev->dev, "asic atom init failed!"); 4932 } else { 4933 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); 4934 r = amdgpu_amdkfd_resume_iommu(tmp_adev); 4935 if (r) 4936 goto out; 4937 4938 r = amdgpu_device_ip_resume_phase1(tmp_adev); 4939 if (r) 4940 goto out; 4941 4942 vram_lost = amdgpu_device_check_vram_lost(tmp_adev); 4943 #ifdef CONFIG_DEV_COREDUMP 4944 tmp_adev->reset_vram_lost = vram_lost; 4945 memset(&tmp_adev->reset_task_info, 0, 4946 sizeof(tmp_adev->reset_task_info)); 4947 if (reset_context->job && reset_context->job->vm) 4948 tmp_adev->reset_task_info = 4949 reset_context->job->vm->task_info; 4950 amdgpu_reset_capture_coredumpm(tmp_adev); 4951 #endif 4952 if (vram_lost) { 4953 DRM_INFO("VRAM is lost due to GPU reset!\n"); 4954 amdgpu_inc_vram_lost(tmp_adev); 4955 } 4956 4957 r = amdgpu_device_fw_loading(tmp_adev); 4958 if (r) 4959 return r; 4960 4961 r = amdgpu_device_ip_resume_phase2(tmp_adev); 4962 if (r) 4963 goto out; 4964 4965 if (vram_lost) 4966 amdgpu_device_fill_reset_magic(tmp_adev); 4967 4968 /* 4969 * Add this ASIC as tracked as reset was already 4970 * complete successfully. 4971 */ 4972 amdgpu_register_gpu_instance(tmp_adev); 4973 4974 if (!reset_context->hive && 4975 tmp_adev->gmc.xgmi.num_physical_nodes > 1) 4976 amdgpu_xgmi_add_device(tmp_adev); 4977 4978 r = amdgpu_device_ip_late_init(tmp_adev); 4979 if (r) 4980 goto out; 4981 4982 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false); 4983 4984 /* 4985 * The GPU enters bad state once faulty pages 4986 * by ECC has reached the threshold, and ras 4987 * recovery is scheduled next. So add one check 4988 * here to break recovery if it indeed exceeds 4989 * bad page threshold, and remind user to 4990 * retire this GPU or setting one bigger 4991 * bad_page_threshold value to fix this once 4992 * probing driver again. 4993 */ 4994 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) { 4995 /* must succeed. */ 4996 amdgpu_ras_resume(tmp_adev); 4997 } else { 4998 r = -EINVAL; 4999 goto out; 5000 } 5001 5002 /* Update PSP FW topology after reset */ 5003 if (reset_context->hive && 5004 tmp_adev->gmc.xgmi.num_physical_nodes > 1) 5005 r = amdgpu_xgmi_update_topology( 5006 reset_context->hive, tmp_adev); 5007 } 5008 } 5009 5010 out: 5011 if (!r) { 5012 amdgpu_irq_gpu_reset_resume_helper(tmp_adev); 5013 r = amdgpu_ib_ring_tests(tmp_adev); 5014 if (r) { 5015 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r); 5016 need_full_reset = true; 5017 r = -EAGAIN; 5018 goto end; 5019 } 5020 } 5021 5022 if (!r) 5023 r = amdgpu_device_recover_vram(tmp_adev); 5024 else 5025 tmp_adev->asic_reset_res = r; 5026 } 5027 5028 end: 5029 if (need_full_reset) 5030 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 5031 else 5032 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 5033 return r; 5034 } 5035 5036 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev) 5037 { 5038 5039 switch (amdgpu_asic_reset_method(adev)) { 5040 case AMD_RESET_METHOD_MODE1: 5041 adev->mp1_state = PP_MP1_STATE_SHUTDOWN; 5042 break; 5043 case AMD_RESET_METHOD_MODE2: 5044 adev->mp1_state = PP_MP1_STATE_RESET; 5045 break; 5046 default: 5047 adev->mp1_state = PP_MP1_STATE_NONE; 5048 break; 5049 } 5050 } 5051 5052 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev) 5053 { 5054 amdgpu_vf_error_trans_all(adev); 5055 adev->mp1_state = PP_MP1_STATE_NONE; 5056 } 5057 5058 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev) 5059 { 5060 struct pci_dev *p = NULL; 5061 5062 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), 5063 adev->pdev->bus->number, 1); 5064 if (p) { 5065 pm_runtime_enable(&(p->dev)); 5066 pm_runtime_resume(&(p->dev)); 5067 } 5068 5069 pci_dev_put(p); 5070 } 5071 5072 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev) 5073 { 5074 enum amd_reset_method reset_method; 5075 struct pci_dev *p = NULL; 5076 u64 expires; 5077 5078 /* 5079 * For now, only BACO and mode1 reset are confirmed 5080 * to suffer the audio issue without proper suspended. 5081 */ 5082 reset_method = amdgpu_asic_reset_method(adev); 5083 if ((reset_method != AMD_RESET_METHOD_BACO) && 5084 (reset_method != AMD_RESET_METHOD_MODE1)) 5085 return -EINVAL; 5086 5087 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), 5088 adev->pdev->bus->number, 1); 5089 if (!p) 5090 return -ENODEV; 5091 5092 expires = pm_runtime_autosuspend_expiration(&(p->dev)); 5093 if (!expires) 5094 /* 5095 * If we cannot get the audio device autosuspend delay, 5096 * a fixed 4S interval will be used. Considering 3S is 5097 * the audio controller default autosuspend delay setting. 5098 * 4S used here is guaranteed to cover that. 5099 */ 5100 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL; 5101 5102 while (!pm_runtime_status_suspended(&(p->dev))) { 5103 if (!pm_runtime_suspend(&(p->dev))) 5104 break; 5105 5106 if (expires < ktime_get_mono_fast_ns()) { 5107 dev_warn(adev->dev, "failed to suspend display audio\n"); 5108 pci_dev_put(p); 5109 /* TODO: abort the succeeding gpu reset? */ 5110 return -ETIMEDOUT; 5111 } 5112 } 5113 5114 pm_runtime_disable(&(p->dev)); 5115 5116 pci_dev_put(p); 5117 return 0; 5118 } 5119 5120 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev) 5121 { 5122 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 5123 5124 #if defined(CONFIG_DEBUG_FS) 5125 if (!amdgpu_sriov_vf(adev)) 5126 cancel_work(&adev->reset_work); 5127 #endif 5128 5129 if (adev->kfd.dev) 5130 cancel_work(&adev->kfd.reset_work); 5131 5132 if (amdgpu_sriov_vf(adev)) 5133 cancel_work(&adev->virt.flr_work); 5134 5135 if (con && adev->ras_enabled) 5136 cancel_work(&con->recovery_work); 5137 5138 } 5139 5140 /** 5141 * amdgpu_device_gpu_recover - reset the asic and recover scheduler 5142 * 5143 * @adev: amdgpu_device pointer 5144 * @job: which job trigger hang 5145 * 5146 * Attempt to reset the GPU if it has hung (all asics). 5147 * Attempt to do soft-reset or full-reset and reinitialize Asic 5148 * Returns 0 for success or an error on failure. 5149 */ 5150 5151 int amdgpu_device_gpu_recover(struct amdgpu_device *adev, 5152 struct amdgpu_job *job, 5153 struct amdgpu_reset_context *reset_context) 5154 { 5155 struct list_head device_list, *device_list_handle = NULL; 5156 bool job_signaled = false; 5157 struct amdgpu_hive_info *hive = NULL; 5158 struct amdgpu_device *tmp_adev = NULL; 5159 int i, r = 0; 5160 bool need_emergency_restart = false; 5161 bool audio_suspended = false; 5162 bool gpu_reset_for_dev_remove = false; 5163 5164 gpu_reset_for_dev_remove = 5165 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) && 5166 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 5167 5168 /* 5169 * Special case: RAS triggered and full reset isn't supported 5170 */ 5171 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev); 5172 5173 /* 5174 * Flush RAM to disk so that after reboot 5175 * the user can read log and see why the system rebooted. 5176 */ 5177 if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) { 5178 DRM_WARN("Emergency reboot."); 5179 5180 ksys_sync_helper(); 5181 emergency_restart(); 5182 } 5183 5184 dev_info(adev->dev, "GPU %s begin!\n", 5185 need_emergency_restart ? "jobs stop":"reset"); 5186 5187 if (!amdgpu_sriov_vf(adev)) 5188 hive = amdgpu_get_xgmi_hive(adev); 5189 if (hive) 5190 mutex_lock(&hive->hive_lock); 5191 5192 reset_context->job = job; 5193 reset_context->hive = hive; 5194 /* 5195 * Build list of devices to reset. 5196 * In case we are in XGMI hive mode, resort the device list 5197 * to put adev in the 1st position. 5198 */ 5199 INIT_LIST_HEAD(&device_list); 5200 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) { 5201 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { 5202 list_add_tail(&tmp_adev->reset_list, &device_list); 5203 if (gpu_reset_for_dev_remove && adev->shutdown) 5204 tmp_adev->shutdown = true; 5205 } 5206 if (!list_is_first(&adev->reset_list, &device_list)) 5207 list_rotate_to_front(&adev->reset_list, &device_list); 5208 device_list_handle = &device_list; 5209 } else { 5210 list_add_tail(&adev->reset_list, &device_list); 5211 device_list_handle = &device_list; 5212 } 5213 5214 /* We need to lock reset domain only once both for XGMI and single device */ 5215 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, 5216 reset_list); 5217 amdgpu_device_lock_reset_domain(tmp_adev->reset_domain); 5218 5219 /* block all schedulers and reset given job's ring */ 5220 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 5221 5222 amdgpu_device_set_mp1_state(tmp_adev); 5223 5224 /* 5225 * Try to put the audio codec into suspend state 5226 * before gpu reset started. 5227 * 5228 * Due to the power domain of the graphics device 5229 * is shared with AZ power domain. Without this, 5230 * we may change the audio hardware from behind 5231 * the audio driver's back. That will trigger 5232 * some audio codec errors. 5233 */ 5234 if (!amdgpu_device_suspend_display_audio(tmp_adev)) 5235 audio_suspended = true; 5236 5237 amdgpu_ras_set_error_query_ready(tmp_adev, false); 5238 5239 cancel_delayed_work_sync(&tmp_adev->delayed_init_work); 5240 5241 if (!amdgpu_sriov_vf(tmp_adev)) 5242 amdgpu_amdkfd_pre_reset(tmp_adev); 5243 5244 /* 5245 * Mark these ASICs to be reseted as untracked first 5246 * And add them back after reset completed 5247 */ 5248 amdgpu_unregister_gpu_instance(tmp_adev); 5249 5250 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true); 5251 5252 /* disable ras on ALL IPs */ 5253 if (!need_emergency_restart && 5254 amdgpu_device_ip_need_full_reset(tmp_adev)) 5255 amdgpu_ras_suspend(tmp_adev); 5256 5257 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 5258 struct amdgpu_ring *ring = tmp_adev->rings[i]; 5259 5260 if (!ring || !ring->sched.thread) 5261 continue; 5262 5263 drm_sched_stop(&ring->sched, job ? &job->base : NULL); 5264 5265 if (need_emergency_restart) 5266 amdgpu_job_stop_all_jobs_on_sched(&ring->sched); 5267 } 5268 atomic_inc(&tmp_adev->gpu_reset_counter); 5269 } 5270 5271 if (need_emergency_restart) 5272 goto skip_sched_resume; 5273 5274 /* 5275 * Must check guilty signal here since after this point all old 5276 * HW fences are force signaled. 5277 * 5278 * job->base holds a reference to parent fence 5279 */ 5280 if (job && dma_fence_is_signaled(&job->hw_fence)) { 5281 job_signaled = true; 5282 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset"); 5283 goto skip_hw_reset; 5284 } 5285 5286 retry: /* Rest of adevs pre asic reset from XGMI hive. */ 5287 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 5288 if (gpu_reset_for_dev_remove) { 5289 /* Workaroud for ASICs need to disable SMC first */ 5290 amdgpu_device_smu_fini_early(tmp_adev); 5291 } 5292 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context); 5293 /*TODO Should we stop ?*/ 5294 if (r) { 5295 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ", 5296 r, adev_to_drm(tmp_adev)->unique); 5297 tmp_adev->asic_reset_res = r; 5298 } 5299 5300 /* 5301 * Drop all pending non scheduler resets. Scheduler resets 5302 * were already dropped during drm_sched_stop 5303 */ 5304 amdgpu_device_stop_pending_resets(tmp_adev); 5305 } 5306 5307 /* Actual ASIC resets if needed.*/ 5308 /* Host driver will handle XGMI hive reset for SRIOV */ 5309 if (amdgpu_sriov_vf(adev)) { 5310 r = amdgpu_device_reset_sriov(adev, job ? false : true); 5311 if (r) 5312 adev->asic_reset_res = r; 5313 5314 /* Aldebaran supports ras in SRIOV, so need resume ras during reset */ 5315 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) 5316 amdgpu_ras_resume(adev); 5317 } else { 5318 r = amdgpu_do_asic_reset(device_list_handle, reset_context); 5319 if (r && r == -EAGAIN) 5320 goto retry; 5321 5322 if (!r && gpu_reset_for_dev_remove) 5323 goto recover_end; 5324 } 5325 5326 skip_hw_reset: 5327 5328 /* Post ASIC reset for all devs .*/ 5329 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 5330 5331 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 5332 struct amdgpu_ring *ring = tmp_adev->rings[i]; 5333 5334 if (!ring || !ring->sched.thread) 5335 continue; 5336 5337 drm_sched_start(&ring->sched, true); 5338 } 5339 5340 if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3)) 5341 amdgpu_mes_self_test(tmp_adev); 5342 5343 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) { 5344 drm_helper_resume_force_mode(adev_to_drm(tmp_adev)); 5345 } 5346 5347 if (tmp_adev->asic_reset_res) 5348 r = tmp_adev->asic_reset_res; 5349 5350 tmp_adev->asic_reset_res = 0; 5351 5352 if (r) { 5353 /* bad news, how to tell it to userspace ? */ 5354 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter)); 5355 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); 5356 } else { 5357 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter)); 5358 if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0)) 5359 DRM_WARN("smart shift update failed\n"); 5360 } 5361 } 5362 5363 skip_sched_resume: 5364 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 5365 /* unlock kfd: SRIOV would do it separately */ 5366 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev)) 5367 amdgpu_amdkfd_post_reset(tmp_adev); 5368 5369 /* kfd_post_reset will do nothing if kfd device is not initialized, 5370 * need to bring up kfd here if it's not be initialized before 5371 */ 5372 if (!adev->kfd.init_complete) 5373 amdgpu_amdkfd_device_init(adev); 5374 5375 if (audio_suspended) 5376 amdgpu_device_resume_display_audio(tmp_adev); 5377 5378 amdgpu_device_unset_mp1_state(tmp_adev); 5379 5380 amdgpu_ras_set_error_query_ready(tmp_adev, true); 5381 } 5382 5383 recover_end: 5384 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, 5385 reset_list); 5386 amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain); 5387 5388 if (hive) { 5389 mutex_unlock(&hive->hive_lock); 5390 amdgpu_put_xgmi_hive(hive); 5391 } 5392 5393 if (r) 5394 dev_info(adev->dev, "GPU reset end with ret = %d\n", r); 5395 5396 atomic_set(&adev->reset_domain->reset_res, r); 5397 return r; 5398 } 5399 5400 /** 5401 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot 5402 * 5403 * @adev: amdgpu_device pointer 5404 * 5405 * Fetchs and stores in the driver the PCIE capabilities (gen speed 5406 * and lanes) of the slot the device is in. Handles APUs and 5407 * virtualized environments where PCIE config space may not be available. 5408 */ 5409 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) 5410 { 5411 struct pci_dev *pdev; 5412 enum pci_bus_speed speed_cap, platform_speed_cap; 5413 enum pcie_link_width platform_link_width; 5414 5415 if (amdgpu_pcie_gen_cap) 5416 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap; 5417 5418 if (amdgpu_pcie_lane_cap) 5419 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap; 5420 5421 /* covers APUs as well */ 5422 if (pci_is_root_bus(adev->pdev->bus)) { 5423 if (adev->pm.pcie_gen_mask == 0) 5424 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK; 5425 if (adev->pm.pcie_mlw_mask == 0) 5426 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK; 5427 return; 5428 } 5429 5430 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask) 5431 return; 5432 5433 pcie_bandwidth_available(adev->pdev, NULL, 5434 &platform_speed_cap, &platform_link_width); 5435 5436 if (adev->pm.pcie_gen_mask == 0) { 5437 /* asic caps */ 5438 pdev = adev->pdev; 5439 speed_cap = pcie_get_speed_cap(pdev); 5440 if (speed_cap == PCI_SPEED_UNKNOWN) { 5441 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5442 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5443 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); 5444 } else { 5445 if (speed_cap == PCIE_SPEED_32_0GT) 5446 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5447 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5448 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 | 5449 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 | 5450 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5); 5451 else if (speed_cap == PCIE_SPEED_16_0GT) 5452 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5453 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5454 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 | 5455 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4); 5456 else if (speed_cap == PCIE_SPEED_8_0GT) 5457 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5458 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5459 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); 5460 else if (speed_cap == PCIE_SPEED_5_0GT) 5461 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5462 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2); 5463 else 5464 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1; 5465 } 5466 /* platform caps */ 5467 if (platform_speed_cap == PCI_SPEED_UNKNOWN) { 5468 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5469 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2); 5470 } else { 5471 if (platform_speed_cap == PCIE_SPEED_32_0GT) 5472 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5473 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5474 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 | 5475 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 | 5476 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5); 5477 else if (platform_speed_cap == PCIE_SPEED_16_0GT) 5478 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5479 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5480 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 | 5481 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4); 5482 else if (platform_speed_cap == PCIE_SPEED_8_0GT) 5483 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5484 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5485 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3); 5486 else if (platform_speed_cap == PCIE_SPEED_5_0GT) 5487 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5488 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2); 5489 else 5490 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; 5491 5492 } 5493 } 5494 if (adev->pm.pcie_mlw_mask == 0) { 5495 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) { 5496 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK; 5497 } else { 5498 switch (platform_link_width) { 5499 case PCIE_LNK_X32: 5500 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | 5501 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 5502 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 5503 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 5504 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 5505 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5506 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5507 break; 5508 case PCIE_LNK_X16: 5509 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 5510 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 5511 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 5512 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 5513 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5514 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5515 break; 5516 case PCIE_LNK_X12: 5517 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 5518 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 5519 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 5520 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5521 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5522 break; 5523 case PCIE_LNK_X8: 5524 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 5525 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 5526 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5527 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5528 break; 5529 case PCIE_LNK_X4: 5530 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 5531 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5532 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5533 break; 5534 case PCIE_LNK_X2: 5535 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5536 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5537 break; 5538 case PCIE_LNK_X1: 5539 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; 5540 break; 5541 default: 5542 break; 5543 } 5544 } 5545 } 5546 } 5547 5548 /** 5549 * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR 5550 * 5551 * @adev: amdgpu_device pointer 5552 * @peer_adev: amdgpu_device pointer for peer device trying to access @adev 5553 * 5554 * Return true if @peer_adev can access (DMA) @adev through the PCIe 5555 * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of 5556 * @peer_adev. 5557 */ 5558 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, 5559 struct amdgpu_device *peer_adev) 5560 { 5561 #ifdef CONFIG_HSA_AMD_P2P 5562 uint64_t address_mask = peer_adev->dev->dma_mask ? 5563 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1); 5564 resource_size_t aper_limit = 5565 adev->gmc.aper_base + adev->gmc.aper_size - 1; 5566 bool p2p_access = 5567 !adev->gmc.xgmi.connected_to_cpu && 5568 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0); 5569 5570 return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size && 5571 adev->gmc.real_vram_size == adev->gmc.visible_vram_size && 5572 !(adev->gmc.aper_base & address_mask || 5573 aper_limit & address_mask)); 5574 #else 5575 return false; 5576 #endif 5577 } 5578 5579 int amdgpu_device_baco_enter(struct drm_device *dev) 5580 { 5581 struct amdgpu_device *adev = drm_to_adev(dev); 5582 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 5583 5584 if (!amdgpu_device_supports_baco(adev_to_drm(adev))) 5585 return -ENOTSUPP; 5586 5587 if (ras && adev->ras_enabled && 5588 adev->nbio.funcs->enable_doorbell_interrupt) 5589 adev->nbio.funcs->enable_doorbell_interrupt(adev, false); 5590 5591 return amdgpu_dpm_baco_enter(adev); 5592 } 5593 5594 int amdgpu_device_baco_exit(struct drm_device *dev) 5595 { 5596 struct amdgpu_device *adev = drm_to_adev(dev); 5597 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 5598 int ret = 0; 5599 5600 if (!amdgpu_device_supports_baco(adev_to_drm(adev))) 5601 return -ENOTSUPP; 5602 5603 ret = amdgpu_dpm_baco_exit(adev); 5604 if (ret) 5605 return ret; 5606 5607 if (ras && adev->ras_enabled && 5608 adev->nbio.funcs->enable_doorbell_interrupt) 5609 adev->nbio.funcs->enable_doorbell_interrupt(adev, true); 5610 5611 if (amdgpu_passthrough(adev) && 5612 adev->nbio.funcs->clear_doorbell_interrupt) 5613 adev->nbio.funcs->clear_doorbell_interrupt(adev); 5614 5615 return 0; 5616 } 5617 5618 /** 5619 * amdgpu_pci_error_detected - Called when a PCI error is detected. 5620 * @pdev: PCI device struct 5621 * @state: PCI channel state 5622 * 5623 * Description: Called when a PCI error is detected. 5624 * 5625 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT. 5626 */ 5627 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 5628 { 5629 struct drm_device *dev = pci_get_drvdata(pdev); 5630 struct amdgpu_device *adev = drm_to_adev(dev); 5631 int i; 5632 5633 DRM_INFO("PCI error: detected callback, state(%d)!!\n", state); 5634 5635 if (adev->gmc.xgmi.num_physical_nodes > 1) { 5636 DRM_WARN("No support for XGMI hive yet..."); 5637 return PCI_ERS_RESULT_DISCONNECT; 5638 } 5639 5640 adev->pci_channel_state = state; 5641 5642 switch (state) { 5643 case pci_channel_io_normal: 5644 return PCI_ERS_RESULT_CAN_RECOVER; 5645 /* Fatal error, prepare for slot reset */ 5646 case pci_channel_io_frozen: 5647 /* 5648 * Locking adev->reset_domain->sem will prevent any external access 5649 * to GPU during PCI error recovery 5650 */ 5651 amdgpu_device_lock_reset_domain(adev->reset_domain); 5652 amdgpu_device_set_mp1_state(adev); 5653 5654 /* 5655 * Block any work scheduling as we do for regular GPU reset 5656 * for the duration of the recovery 5657 */ 5658 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 5659 struct amdgpu_ring *ring = adev->rings[i]; 5660 5661 if (!ring || !ring->sched.thread) 5662 continue; 5663 5664 drm_sched_stop(&ring->sched, NULL); 5665 } 5666 atomic_inc(&adev->gpu_reset_counter); 5667 return PCI_ERS_RESULT_NEED_RESET; 5668 case pci_channel_io_perm_failure: 5669 /* Permanent error, prepare for device removal */ 5670 return PCI_ERS_RESULT_DISCONNECT; 5671 } 5672 5673 return PCI_ERS_RESULT_NEED_RESET; 5674 } 5675 5676 /** 5677 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers 5678 * @pdev: pointer to PCI device 5679 */ 5680 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev) 5681 { 5682 5683 DRM_INFO("PCI error: mmio enabled callback!!\n"); 5684 5685 /* TODO - dump whatever for debugging purposes */ 5686 5687 /* This called only if amdgpu_pci_error_detected returns 5688 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still 5689 * works, no need to reset slot. 5690 */ 5691 5692 return PCI_ERS_RESULT_RECOVERED; 5693 } 5694 5695 /** 5696 * amdgpu_pci_slot_reset - Called when PCI slot has been reset. 5697 * @pdev: PCI device struct 5698 * 5699 * Description: This routine is called by the pci error recovery 5700 * code after the PCI slot has been reset, just before we 5701 * should resume normal operations. 5702 */ 5703 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev) 5704 { 5705 struct drm_device *dev = pci_get_drvdata(pdev); 5706 struct amdgpu_device *adev = drm_to_adev(dev); 5707 int r, i; 5708 struct amdgpu_reset_context reset_context; 5709 u32 memsize; 5710 struct list_head device_list; 5711 5712 DRM_INFO("PCI error: slot reset callback!!\n"); 5713 5714 memset(&reset_context, 0, sizeof(reset_context)); 5715 5716 INIT_LIST_HEAD(&device_list); 5717 list_add_tail(&adev->reset_list, &device_list); 5718 5719 /* wait for asic to come out of reset */ 5720 msleep(500); 5721 5722 /* Restore PCI confspace */ 5723 amdgpu_device_load_pci_state(pdev); 5724 5725 /* confirm ASIC came out of reset */ 5726 for (i = 0; i < adev->usec_timeout; i++) { 5727 memsize = amdgpu_asic_get_config_memsize(adev); 5728 5729 if (memsize != 0xffffffff) 5730 break; 5731 udelay(1); 5732 } 5733 if (memsize == 0xffffffff) { 5734 r = -ETIME; 5735 goto out; 5736 } 5737 5738 reset_context.method = AMD_RESET_METHOD_NONE; 5739 reset_context.reset_req_dev = adev; 5740 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 5741 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags); 5742 5743 adev->no_hw_access = true; 5744 r = amdgpu_device_pre_asic_reset(adev, &reset_context); 5745 adev->no_hw_access = false; 5746 if (r) 5747 goto out; 5748 5749 r = amdgpu_do_asic_reset(&device_list, &reset_context); 5750 5751 out: 5752 if (!r) { 5753 if (amdgpu_device_cache_pci_state(adev->pdev)) 5754 pci_restore_state(adev->pdev); 5755 5756 DRM_INFO("PCIe error recovery succeeded\n"); 5757 } else { 5758 DRM_ERROR("PCIe error recovery failed, err:%d", r); 5759 amdgpu_device_unset_mp1_state(adev); 5760 amdgpu_device_unlock_reset_domain(adev->reset_domain); 5761 } 5762 5763 return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 5764 } 5765 5766 /** 5767 * amdgpu_pci_resume() - resume normal ops after PCI reset 5768 * @pdev: pointer to PCI device 5769 * 5770 * Called when the error recovery driver tells us that its 5771 * OK to resume normal operation. 5772 */ 5773 void amdgpu_pci_resume(struct pci_dev *pdev) 5774 { 5775 struct drm_device *dev = pci_get_drvdata(pdev); 5776 struct amdgpu_device *adev = drm_to_adev(dev); 5777 int i; 5778 5779 5780 DRM_INFO("PCI error: resume callback!!\n"); 5781 5782 /* Only continue execution for the case of pci_channel_io_frozen */ 5783 if (adev->pci_channel_state != pci_channel_io_frozen) 5784 return; 5785 5786 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 5787 struct amdgpu_ring *ring = adev->rings[i]; 5788 5789 if (!ring || !ring->sched.thread) 5790 continue; 5791 5792 drm_sched_start(&ring->sched, true); 5793 } 5794 5795 amdgpu_device_unset_mp1_state(adev); 5796 amdgpu_device_unlock_reset_domain(adev->reset_domain); 5797 } 5798 5799 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev) 5800 { 5801 struct drm_device *dev = pci_get_drvdata(pdev); 5802 struct amdgpu_device *adev = drm_to_adev(dev); 5803 int r; 5804 5805 r = pci_save_state(pdev); 5806 if (!r) { 5807 kfree(adev->pci_state); 5808 5809 adev->pci_state = pci_store_saved_state(pdev); 5810 5811 if (!adev->pci_state) { 5812 DRM_ERROR("Failed to store PCI saved state"); 5813 return false; 5814 } 5815 } else { 5816 DRM_WARN("Failed to save PCI state, err:%d\n", r); 5817 return false; 5818 } 5819 5820 return true; 5821 } 5822 5823 bool amdgpu_device_load_pci_state(struct pci_dev *pdev) 5824 { 5825 struct drm_device *dev = pci_get_drvdata(pdev); 5826 struct amdgpu_device *adev = drm_to_adev(dev); 5827 int r; 5828 5829 if (!adev->pci_state) 5830 return false; 5831 5832 r = pci_load_saved_state(pdev, adev->pci_state); 5833 5834 if (!r) { 5835 pci_restore_state(pdev); 5836 } else { 5837 DRM_WARN("Failed to load PCI state, err:%d\n", r); 5838 return false; 5839 } 5840 5841 return true; 5842 } 5843 5844 void amdgpu_device_flush_hdp(struct amdgpu_device *adev, 5845 struct amdgpu_ring *ring) 5846 { 5847 #ifdef CONFIG_X86_64 5848 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) 5849 return; 5850 #endif 5851 if (adev->gmc.xgmi.connected_to_cpu) 5852 return; 5853 5854 if (ring && ring->funcs->emit_hdp_flush) 5855 amdgpu_ring_emit_hdp_flush(ring); 5856 else 5857 amdgpu_asic_flush_hdp(adev, ring); 5858 } 5859 5860 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, 5861 struct amdgpu_ring *ring) 5862 { 5863 #ifdef CONFIG_X86_64 5864 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) 5865 return; 5866 #endif 5867 if (adev->gmc.xgmi.connected_to_cpu) 5868 return; 5869 5870 amdgpu_asic_invalidate_hdp(adev, ring); 5871 } 5872 5873 int amdgpu_in_reset(struct amdgpu_device *adev) 5874 { 5875 return atomic_read(&adev->reset_domain->in_gpu_reset); 5876 } 5877 5878 /** 5879 * amdgpu_device_halt() - bring hardware to some kind of halt state 5880 * 5881 * @adev: amdgpu_device pointer 5882 * 5883 * Bring hardware to some kind of halt state so that no one can touch it 5884 * any more. It will help to maintain error context when error occurred. 5885 * Compare to a simple hang, the system will keep stable at least for SSH 5886 * access. Then it should be trivial to inspect the hardware state and 5887 * see what's going on. Implemented as following: 5888 * 5889 * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc), 5890 * clears all CPU mappings to device, disallows remappings through page faults 5891 * 2. amdgpu_irq_disable_all() disables all interrupts 5892 * 3. amdgpu_fence_driver_hw_fini() signals all HW fences 5893 * 4. set adev->no_hw_access to avoid potential crashes after setp 5 5894 * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings 5895 * 6. pci_disable_device() and pci_wait_for_pending_transaction() 5896 * flush any in flight DMA operations 5897 */ 5898 void amdgpu_device_halt(struct amdgpu_device *adev) 5899 { 5900 struct pci_dev *pdev = adev->pdev; 5901 struct drm_device *ddev = adev_to_drm(adev); 5902 5903 drm_dev_unplug(ddev); 5904 5905 amdgpu_irq_disable_all(adev); 5906 5907 amdgpu_fence_driver_hw_fini(adev); 5908 5909 adev->no_hw_access = true; 5910 5911 amdgpu_device_unmap_mmio(adev); 5912 5913 pci_disable_device(pdev); 5914 pci_wait_for_pending_transaction(pdev); 5915 } 5916 5917 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev, 5918 u32 reg) 5919 { 5920 unsigned long flags, address, data; 5921 u32 r; 5922 5923 address = adev->nbio.funcs->get_pcie_port_index_offset(adev); 5924 data = adev->nbio.funcs->get_pcie_port_data_offset(adev); 5925 5926 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 5927 WREG32(address, reg * 4); 5928 (void)RREG32(address); 5929 r = RREG32(data); 5930 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 5931 return r; 5932 } 5933 5934 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev, 5935 u32 reg, u32 v) 5936 { 5937 unsigned long flags, address, data; 5938 5939 address = adev->nbio.funcs->get_pcie_port_index_offset(adev); 5940 data = adev->nbio.funcs->get_pcie_port_data_offset(adev); 5941 5942 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 5943 WREG32(address, reg * 4); 5944 (void)RREG32(address); 5945 WREG32(data, v); 5946 (void)RREG32(data); 5947 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 5948 } 5949 5950 /** 5951 * amdgpu_device_switch_gang - switch to a new gang 5952 * @adev: amdgpu_device pointer 5953 * @gang: the gang to switch to 5954 * 5955 * Try to switch to a new gang. 5956 * Returns: NULL if we switched to the new gang or a reference to the current 5957 * gang leader. 5958 */ 5959 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev, 5960 struct dma_fence *gang) 5961 { 5962 struct dma_fence *old = NULL; 5963 5964 do { 5965 dma_fence_put(old); 5966 rcu_read_lock(); 5967 old = dma_fence_get_rcu_safe(&adev->gang_submit); 5968 rcu_read_unlock(); 5969 5970 if (old == gang) 5971 break; 5972 5973 if (!dma_fence_is_signaled(old)) 5974 return old; 5975 5976 } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit, 5977 old, gang) != old); 5978 5979 dma_fence_put(old); 5980 return NULL; 5981 } 5982 5983 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev) 5984 { 5985 switch (adev->asic_type) { 5986 #ifdef CONFIG_DRM_AMDGPU_SI 5987 case CHIP_HAINAN: 5988 #endif 5989 case CHIP_TOPAZ: 5990 /* chips with no display hardware */ 5991 return false; 5992 #ifdef CONFIG_DRM_AMDGPU_SI 5993 case CHIP_TAHITI: 5994 case CHIP_PITCAIRN: 5995 case CHIP_VERDE: 5996 case CHIP_OLAND: 5997 #endif 5998 #ifdef CONFIG_DRM_AMDGPU_CIK 5999 case CHIP_BONAIRE: 6000 case CHIP_HAWAII: 6001 case CHIP_KAVERI: 6002 case CHIP_KABINI: 6003 case CHIP_MULLINS: 6004 #endif 6005 case CHIP_TONGA: 6006 case CHIP_FIJI: 6007 case CHIP_POLARIS10: 6008 case CHIP_POLARIS11: 6009 case CHIP_POLARIS12: 6010 case CHIP_VEGAM: 6011 case CHIP_CARRIZO: 6012 case CHIP_STONEY: 6013 /* chips with display hardware */ 6014 return true; 6015 default: 6016 /* IP discovery */ 6017 if (!adev->ip_versions[DCE_HWIP][0] || 6018 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) 6019 return false; 6020 return true; 6021 } 6022 } 6023