1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/power_supply.h> 29 #include <linux/kthread.h> 30 #include <linux/module.h> 31 #include <linux/console.h> 32 #include <linux/slab.h> 33 34 #include <drm/drm_atomic_helper.h> 35 #include <drm/drm_probe_helper.h> 36 #include <drm/amdgpu_drm.h> 37 #include <linux/vgaarb.h> 38 #include <linux/vga_switcheroo.h> 39 #include <linux/efi.h> 40 #include "amdgpu.h" 41 #include "amdgpu_trace.h" 42 #include "amdgpu_i2c.h" 43 #include "atom.h" 44 #include "amdgpu_atombios.h" 45 #include "amdgpu_atomfirmware.h" 46 #include "amd_pcie.h" 47 #ifdef CONFIG_DRM_AMDGPU_SI 48 #include "si.h" 49 #endif 50 #ifdef CONFIG_DRM_AMDGPU_CIK 51 #include "cik.h" 52 #endif 53 #include "vi.h" 54 #include "soc15.h" 55 #include "nv.h" 56 #include "bif/bif_4_1_d.h" 57 #include <linux/pci.h> 58 #include <linux/firmware.h> 59 #include "amdgpu_vf_error.h" 60 61 #include "amdgpu_amdkfd.h" 62 #include "amdgpu_pm.h" 63 64 #include "amdgpu_xgmi.h" 65 #include "amdgpu_ras.h" 66 #include "amdgpu_pmu.h" 67 68 #include <linux/suspend.h> 69 70 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); 71 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); 72 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); 73 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin"); 74 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin"); 75 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin"); 76 MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin"); 77 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin"); 78 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin"); 79 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin"); 80 81 #define AMDGPU_RESUME_MS 2000 82 83 const char *amdgpu_asic_name[] = { 84 "TAHITI", 85 "PITCAIRN", 86 "VERDE", 87 "OLAND", 88 "HAINAN", 89 "BONAIRE", 90 "KAVERI", 91 "KABINI", 92 "HAWAII", 93 "MULLINS", 94 "TOPAZ", 95 "TONGA", 96 "FIJI", 97 "CARRIZO", 98 "STONEY", 99 "POLARIS10", 100 "POLARIS11", 101 "POLARIS12", 102 "VEGAM", 103 "VEGA10", 104 "VEGA12", 105 "VEGA20", 106 "RAVEN", 107 "ARCTURUS", 108 "RENOIR", 109 "NAVI10", 110 "NAVI14", 111 "NAVI12", 112 "LAST", 113 }; 114 115 /** 116 * DOC: pcie_replay_count 117 * 118 * The amdgpu driver provides a sysfs API for reporting the total number 119 * of PCIe replays (NAKs) 120 * The file pcie_replay_count is used for this and returns the total 121 * number of replays as a sum of the NAKs generated and NAKs received 122 */ 123 124 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev, 125 struct device_attribute *attr, char *buf) 126 { 127 struct drm_device *ddev = dev_get_drvdata(dev); 128 struct amdgpu_device *adev = ddev->dev_private; 129 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev); 130 131 return snprintf(buf, PAGE_SIZE, "%llu\n", cnt); 132 } 133 134 static DEVICE_ATTR(pcie_replay_count, S_IRUGO, 135 amdgpu_device_get_pcie_replay_count, NULL); 136 137 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev); 138 139 /** 140 * amdgpu_device_is_px - Is the device is a dGPU with HG/PX power control 141 * 142 * @dev: drm_device pointer 143 * 144 * Returns true if the device is a dGPU with HG/PX power control, 145 * otherwise return false. 146 */ 147 bool amdgpu_device_is_px(struct drm_device *dev) 148 { 149 struct amdgpu_device *adev = dev->dev_private; 150 151 if (adev->flags & AMD_IS_PX) 152 return true; 153 return false; 154 } 155 156 /** 157 * VRAM access helper functions. 158 * 159 * amdgpu_device_vram_access - read/write a buffer in vram 160 * 161 * @adev: amdgpu_device pointer 162 * @pos: offset of the buffer in vram 163 * @buf: virtual address of the buffer in system memory 164 * @size: read/write size, sizeof(@buf) must > @size 165 * @write: true - write to vram, otherwise - read from vram 166 */ 167 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, 168 uint32_t *buf, size_t size, bool write) 169 { 170 uint64_t last; 171 unsigned long flags; 172 173 last = size - 4; 174 for (last += pos; pos <= last; pos += 4) { 175 spin_lock_irqsave(&adev->mmio_idx_lock, flags); 176 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000); 177 WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31); 178 if (write) 179 WREG32_NO_KIQ(mmMM_DATA, *buf++); 180 else 181 *buf++ = RREG32_NO_KIQ(mmMM_DATA); 182 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); 183 } 184 } 185 186 /* 187 * MMIO register access helper functions. 188 */ 189 /** 190 * amdgpu_mm_rreg - read a memory mapped IO register 191 * 192 * @adev: amdgpu_device pointer 193 * @reg: dword aligned register offset 194 * @acc_flags: access flags which require special behavior 195 * 196 * Returns the 32 bit value from the offset specified. 197 */ 198 uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, 199 uint32_t acc_flags) 200 { 201 uint32_t ret; 202 203 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) 204 return amdgpu_virt_kiq_rreg(adev, reg); 205 206 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) 207 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); 208 else { 209 unsigned long flags; 210 211 spin_lock_irqsave(&adev->mmio_idx_lock, flags); 212 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); 213 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); 214 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); 215 } 216 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret); 217 return ret; 218 } 219 220 /* 221 * MMIO register read with bytes helper functions 222 * @offset:bytes offset from MMIO start 223 * 224 */ 225 226 /** 227 * amdgpu_mm_rreg8 - read a memory mapped IO register 228 * 229 * @adev: amdgpu_device pointer 230 * @offset: byte aligned register offset 231 * 232 * Returns the 8 bit value from the offset specified. 233 */ 234 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) { 235 if (offset < adev->rmmio_size) 236 return (readb(adev->rmmio + offset)); 237 BUG(); 238 } 239 240 /* 241 * MMIO register write with bytes helper functions 242 * @offset:bytes offset from MMIO start 243 * @value: the value want to be written to the register 244 * 245 */ 246 /** 247 * amdgpu_mm_wreg8 - read a memory mapped IO register 248 * 249 * @adev: amdgpu_device pointer 250 * @offset: byte aligned register offset 251 * @value: 8 bit value to write 252 * 253 * Writes the value specified to the offset specified. 254 */ 255 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) { 256 if (offset < adev->rmmio_size) 257 writeb(value, adev->rmmio + offset); 258 else 259 BUG(); 260 } 261 262 /** 263 * amdgpu_mm_wreg - write to a memory mapped IO register 264 * 265 * @adev: amdgpu_device pointer 266 * @reg: dword aligned register offset 267 * @v: 32 bit value to write to the register 268 * @acc_flags: access flags which require special behavior 269 * 270 * Writes the value specified to the offset specified. 271 */ 272 void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, 273 uint32_t acc_flags) 274 { 275 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); 276 277 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { 278 adev->last_mm_index = v; 279 } 280 281 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) 282 return amdgpu_virt_kiq_wreg(adev, reg, v); 283 284 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) 285 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); 286 else { 287 unsigned long flags; 288 289 spin_lock_irqsave(&adev->mmio_idx_lock, flags); 290 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); 291 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); 292 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); 293 } 294 295 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { 296 udelay(500); 297 } 298 } 299 300 /** 301 * amdgpu_io_rreg - read an IO register 302 * 303 * @adev: amdgpu_device pointer 304 * @reg: dword aligned register offset 305 * 306 * Returns the 32 bit value from the offset specified. 307 */ 308 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg) 309 { 310 if ((reg * 4) < adev->rio_mem_size) 311 return ioread32(adev->rio_mem + (reg * 4)); 312 else { 313 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4)); 314 return ioread32(adev->rio_mem + (mmMM_DATA * 4)); 315 } 316 } 317 318 /** 319 * amdgpu_io_wreg - write to an IO register 320 * 321 * @adev: amdgpu_device pointer 322 * @reg: dword aligned register offset 323 * @v: 32 bit value to write to the register 324 * 325 * Writes the value specified to the offset specified. 326 */ 327 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 328 { 329 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { 330 adev->last_mm_index = v; 331 } 332 333 if ((reg * 4) < adev->rio_mem_size) 334 iowrite32(v, adev->rio_mem + (reg * 4)); 335 else { 336 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4)); 337 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4)); 338 } 339 340 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { 341 udelay(500); 342 } 343 } 344 345 /** 346 * amdgpu_mm_rdoorbell - read a doorbell dword 347 * 348 * @adev: amdgpu_device pointer 349 * @index: doorbell index 350 * 351 * Returns the value in the doorbell aperture at the 352 * requested doorbell index (CIK). 353 */ 354 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index) 355 { 356 if (index < adev->doorbell.num_doorbells) { 357 return readl(adev->doorbell.ptr + index); 358 } else { 359 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); 360 return 0; 361 } 362 } 363 364 /** 365 * amdgpu_mm_wdoorbell - write a doorbell dword 366 * 367 * @adev: amdgpu_device pointer 368 * @index: doorbell index 369 * @v: value to write 370 * 371 * Writes @v to the doorbell aperture at the 372 * requested doorbell index (CIK). 373 */ 374 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v) 375 { 376 if (index < adev->doorbell.num_doorbells) { 377 writel(v, adev->doorbell.ptr + index); 378 } else { 379 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); 380 } 381 } 382 383 /** 384 * amdgpu_mm_rdoorbell64 - read a doorbell Qword 385 * 386 * @adev: amdgpu_device pointer 387 * @index: doorbell index 388 * 389 * Returns the value in the doorbell aperture at the 390 * requested doorbell index (VEGA10+). 391 */ 392 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index) 393 { 394 if (index < adev->doorbell.num_doorbells) { 395 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index)); 396 } else { 397 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); 398 return 0; 399 } 400 } 401 402 /** 403 * amdgpu_mm_wdoorbell64 - write a doorbell Qword 404 * 405 * @adev: amdgpu_device pointer 406 * @index: doorbell index 407 * @v: value to write 408 * 409 * Writes @v to the doorbell aperture at the 410 * requested doorbell index (VEGA10+). 411 */ 412 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v) 413 { 414 if (index < adev->doorbell.num_doorbells) { 415 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v); 416 } else { 417 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); 418 } 419 } 420 421 /** 422 * amdgpu_invalid_rreg - dummy reg read function 423 * 424 * @adev: amdgpu device pointer 425 * @reg: offset of register 426 * 427 * Dummy register read function. Used for register blocks 428 * that certain asics don't have (all asics). 429 * Returns the value in the register. 430 */ 431 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg) 432 { 433 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); 434 BUG(); 435 return 0; 436 } 437 438 /** 439 * amdgpu_invalid_wreg - dummy reg write function 440 * 441 * @adev: amdgpu device pointer 442 * @reg: offset of register 443 * @v: value to write to the register 444 * 445 * Dummy register read function. Used for register blocks 446 * that certain asics don't have (all asics). 447 */ 448 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) 449 { 450 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", 451 reg, v); 452 BUG(); 453 } 454 455 /** 456 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function 457 * 458 * @adev: amdgpu device pointer 459 * @reg: offset of register 460 * 461 * Dummy register read function. Used for register blocks 462 * that certain asics don't have (all asics). 463 * Returns the value in the register. 464 */ 465 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg) 466 { 467 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg); 468 BUG(); 469 return 0; 470 } 471 472 /** 473 * amdgpu_invalid_wreg64 - dummy reg write function 474 * 475 * @adev: amdgpu device pointer 476 * @reg: offset of register 477 * @v: value to write to the register 478 * 479 * Dummy register read function. Used for register blocks 480 * that certain asics don't have (all asics). 481 */ 482 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v) 483 { 484 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n", 485 reg, v); 486 BUG(); 487 } 488 489 /** 490 * amdgpu_block_invalid_rreg - dummy reg read function 491 * 492 * @adev: amdgpu device pointer 493 * @block: offset of instance 494 * @reg: offset of register 495 * 496 * Dummy register read function. Used for register blocks 497 * that certain asics don't have (all asics). 498 * Returns the value in the register. 499 */ 500 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev, 501 uint32_t block, uint32_t reg) 502 { 503 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n", 504 reg, block); 505 BUG(); 506 return 0; 507 } 508 509 /** 510 * amdgpu_block_invalid_wreg - dummy reg write function 511 * 512 * @adev: amdgpu device pointer 513 * @block: offset of instance 514 * @reg: offset of register 515 * @v: value to write to the register 516 * 517 * Dummy register read function. Used for register blocks 518 * that certain asics don't have (all asics). 519 */ 520 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev, 521 uint32_t block, 522 uint32_t reg, uint32_t v) 523 { 524 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n", 525 reg, block, v); 526 BUG(); 527 } 528 529 /** 530 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page 531 * 532 * @adev: amdgpu device pointer 533 * 534 * Allocates a scratch page of VRAM for use by various things in the 535 * driver. 536 */ 537 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev) 538 { 539 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, 540 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 541 &adev->vram_scratch.robj, 542 &adev->vram_scratch.gpu_addr, 543 (void **)&adev->vram_scratch.ptr); 544 } 545 546 /** 547 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page 548 * 549 * @adev: amdgpu device pointer 550 * 551 * Frees the VRAM scratch page. 552 */ 553 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev) 554 { 555 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL); 556 } 557 558 /** 559 * amdgpu_device_program_register_sequence - program an array of registers. 560 * 561 * @adev: amdgpu_device pointer 562 * @registers: pointer to the register array 563 * @array_size: size of the register array 564 * 565 * Programs an array or registers with and and or masks. 566 * This is a helper for setting golden registers. 567 */ 568 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, 569 const u32 *registers, 570 const u32 array_size) 571 { 572 u32 tmp, reg, and_mask, or_mask; 573 int i; 574 575 if (array_size % 3) 576 return; 577 578 for (i = 0; i < array_size; i +=3) { 579 reg = registers[i + 0]; 580 and_mask = registers[i + 1]; 581 or_mask = registers[i + 2]; 582 583 if (and_mask == 0xffffffff) { 584 tmp = or_mask; 585 } else { 586 tmp = RREG32(reg); 587 tmp &= ~and_mask; 588 if (adev->family >= AMDGPU_FAMILY_AI) 589 tmp |= (or_mask & and_mask); 590 else 591 tmp |= or_mask; 592 } 593 WREG32(reg, tmp); 594 } 595 } 596 597 /** 598 * amdgpu_device_pci_config_reset - reset the GPU 599 * 600 * @adev: amdgpu_device pointer 601 * 602 * Resets the GPU using the pci config reset sequence. 603 * Only applicable to asics prior to vega10. 604 */ 605 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev) 606 { 607 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA); 608 } 609 610 /* 611 * GPU doorbell aperture helpers function. 612 */ 613 /** 614 * amdgpu_device_doorbell_init - Init doorbell driver information. 615 * 616 * @adev: amdgpu_device pointer 617 * 618 * Init doorbell driver information (CIK) 619 * Returns 0 on success, error on failure. 620 */ 621 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev) 622 { 623 624 /* No doorbell on SI hardware generation */ 625 if (adev->asic_type < CHIP_BONAIRE) { 626 adev->doorbell.base = 0; 627 adev->doorbell.size = 0; 628 adev->doorbell.num_doorbells = 0; 629 adev->doorbell.ptr = NULL; 630 return 0; 631 } 632 633 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET) 634 return -EINVAL; 635 636 amdgpu_asic_init_doorbell_index(adev); 637 638 /* doorbell bar mapping */ 639 adev->doorbell.base = pci_resource_start(adev->pdev, 2); 640 adev->doorbell.size = pci_resource_len(adev->pdev, 2); 641 642 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32), 643 adev->doorbell_index.max_assignment+1); 644 if (adev->doorbell.num_doorbells == 0) 645 return -EINVAL; 646 647 /* For Vega, reserve and map two pages on doorbell BAR since SDMA 648 * paging queue doorbell use the second page. The 649 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the 650 * doorbells are in the first page. So with paging queue enabled, 651 * the max num_doorbells should + 1 page (0x400 in dword) 652 */ 653 if (adev->asic_type >= CHIP_VEGA10) 654 adev->doorbell.num_doorbells += 0x400; 655 656 adev->doorbell.ptr = ioremap(adev->doorbell.base, 657 adev->doorbell.num_doorbells * 658 sizeof(u32)); 659 if (adev->doorbell.ptr == NULL) 660 return -ENOMEM; 661 662 return 0; 663 } 664 665 /** 666 * amdgpu_device_doorbell_fini - Tear down doorbell driver information. 667 * 668 * @adev: amdgpu_device pointer 669 * 670 * Tear down doorbell driver information (CIK) 671 */ 672 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev) 673 { 674 iounmap(adev->doorbell.ptr); 675 adev->doorbell.ptr = NULL; 676 } 677 678 679 680 /* 681 * amdgpu_device_wb_*() 682 * Writeback is the method by which the GPU updates special pages in memory 683 * with the status of certain GPU events (fences, ring pointers,etc.). 684 */ 685 686 /** 687 * amdgpu_device_wb_fini - Disable Writeback and free memory 688 * 689 * @adev: amdgpu_device pointer 690 * 691 * Disables Writeback and frees the Writeback memory (all asics). 692 * Used at driver shutdown. 693 */ 694 static void amdgpu_device_wb_fini(struct amdgpu_device *adev) 695 { 696 if (adev->wb.wb_obj) { 697 amdgpu_bo_free_kernel(&adev->wb.wb_obj, 698 &adev->wb.gpu_addr, 699 (void **)&adev->wb.wb); 700 adev->wb.wb_obj = NULL; 701 } 702 } 703 704 /** 705 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory 706 * 707 * @adev: amdgpu_device pointer 708 * 709 * Initializes writeback and allocates writeback memory (all asics). 710 * Used at driver startup. 711 * Returns 0 on success or an -error on failure. 712 */ 713 static int amdgpu_device_wb_init(struct amdgpu_device *adev) 714 { 715 int r; 716 717 if (adev->wb.wb_obj == NULL) { 718 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */ 719 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8, 720 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 721 &adev->wb.wb_obj, &adev->wb.gpu_addr, 722 (void **)&adev->wb.wb); 723 if (r) { 724 dev_warn(adev->dev, "(%d) create WB bo failed\n", r); 725 return r; 726 } 727 728 adev->wb.num_wb = AMDGPU_MAX_WB; 729 memset(&adev->wb.used, 0, sizeof(adev->wb.used)); 730 731 /* clear wb memory */ 732 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8); 733 } 734 735 return 0; 736 } 737 738 /** 739 * amdgpu_device_wb_get - Allocate a wb entry 740 * 741 * @adev: amdgpu_device pointer 742 * @wb: wb index 743 * 744 * Allocate a wb slot for use by the driver (all asics). 745 * Returns 0 on success or -EINVAL on failure. 746 */ 747 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb) 748 { 749 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); 750 751 if (offset < adev->wb.num_wb) { 752 __set_bit(offset, adev->wb.used); 753 *wb = offset << 3; /* convert to dw offset */ 754 return 0; 755 } else { 756 return -EINVAL; 757 } 758 } 759 760 /** 761 * amdgpu_device_wb_free - Free a wb entry 762 * 763 * @adev: amdgpu_device pointer 764 * @wb: wb index 765 * 766 * Free a wb slot allocated for use by the driver (all asics) 767 */ 768 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) 769 { 770 wb >>= 3; 771 if (wb < adev->wb.num_wb) 772 __clear_bit(wb, adev->wb.used); 773 } 774 775 /** 776 * amdgpu_device_resize_fb_bar - try to resize FB BAR 777 * 778 * @adev: amdgpu_device pointer 779 * 780 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not 781 * to fail, but if any of the BARs is not accessible after the size we abort 782 * driver loading by returning -ENODEV. 783 */ 784 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) 785 { 786 u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size); 787 u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1; 788 struct pci_bus *root; 789 struct resource *res; 790 unsigned i; 791 u16 cmd; 792 int r; 793 794 /* Bypass for VF */ 795 if (amdgpu_sriov_vf(adev)) 796 return 0; 797 798 /* Check if the root BUS has 64bit memory resources */ 799 root = adev->pdev->bus; 800 while (root->parent) 801 root = root->parent; 802 803 pci_bus_for_each_resource(root, res, i) { 804 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) && 805 res->start > 0x100000000ull) 806 break; 807 } 808 809 /* Trying to resize is pointless without a root hub window above 4GB */ 810 if (!res) 811 return 0; 812 813 /* Disable memory decoding while we change the BAR addresses and size */ 814 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd); 815 pci_write_config_word(adev->pdev, PCI_COMMAND, 816 cmd & ~PCI_COMMAND_MEMORY); 817 818 /* Free the VRAM and doorbell BAR, we most likely need to move both. */ 819 amdgpu_device_doorbell_fini(adev); 820 if (adev->asic_type >= CHIP_BONAIRE) 821 pci_release_resource(adev->pdev, 2); 822 823 pci_release_resource(adev->pdev, 0); 824 825 r = pci_resize_resource(adev->pdev, 0, rbar_size); 826 if (r == -ENOSPC) 827 DRM_INFO("Not enough PCI address space for a large BAR."); 828 else if (r && r != -ENOTSUPP) 829 DRM_ERROR("Problem resizing BAR0 (%d).", r); 830 831 pci_assign_unassigned_bus_resources(adev->pdev->bus); 832 833 /* When the doorbell or fb BAR isn't available we have no chance of 834 * using the device. 835 */ 836 r = amdgpu_device_doorbell_init(adev); 837 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET)) 838 return -ENODEV; 839 840 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd); 841 842 return 0; 843 } 844 845 /* 846 * GPU helpers function. 847 */ 848 /** 849 * amdgpu_device_need_post - check if the hw need post or not 850 * 851 * @adev: amdgpu_device pointer 852 * 853 * Check if the asic has been initialized (all asics) at driver startup 854 * or post is needed if hw reset is performed. 855 * Returns true if need or false if not. 856 */ 857 bool amdgpu_device_need_post(struct amdgpu_device *adev) 858 { 859 uint32_t reg; 860 861 if (amdgpu_sriov_vf(adev)) 862 return false; 863 864 if (amdgpu_passthrough(adev)) { 865 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot 866 * some old smc fw still need driver do vPost otherwise gpu hang, while 867 * those smc fw version above 22.15 doesn't have this flaw, so we force 868 * vpost executed for smc version below 22.15 869 */ 870 if (adev->asic_type == CHIP_FIJI) { 871 int err; 872 uint32_t fw_ver; 873 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev); 874 /* force vPost if error occured */ 875 if (err) 876 return true; 877 878 fw_ver = *((uint32_t *)adev->pm.fw->data + 69); 879 if (fw_ver < 0x00160e00) 880 return true; 881 } 882 } 883 884 if (adev->has_hw_reset) { 885 adev->has_hw_reset = false; 886 return true; 887 } 888 889 /* bios scratch used on CIK+ */ 890 if (adev->asic_type >= CHIP_BONAIRE) 891 return amdgpu_atombios_scratch_need_asic_init(adev); 892 893 /* check MEM_SIZE for older asics */ 894 reg = amdgpu_asic_get_config_memsize(adev); 895 896 if ((reg != 0) && (reg != 0xffffffff)) 897 return false; 898 899 return true; 900 } 901 902 /* if we get transitioned to only one device, take VGA back */ 903 /** 904 * amdgpu_device_vga_set_decode - enable/disable vga decode 905 * 906 * @cookie: amdgpu_device pointer 907 * @state: enable/disable vga decode 908 * 909 * Enable/disable vga decode (all asics). 910 * Returns VGA resource flags. 911 */ 912 static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state) 913 { 914 struct amdgpu_device *adev = cookie; 915 amdgpu_asic_set_vga_state(adev, state); 916 if (state) 917 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 918 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 919 else 920 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 921 } 922 923 /** 924 * amdgpu_device_check_block_size - validate the vm block size 925 * 926 * @adev: amdgpu_device pointer 927 * 928 * Validates the vm block size specified via module parameter. 929 * The vm block size defines number of bits in page table versus page directory, 930 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the 931 * page table and the remaining bits are in the page directory. 932 */ 933 static void amdgpu_device_check_block_size(struct amdgpu_device *adev) 934 { 935 /* defines number of bits in page table versus page directory, 936 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the 937 * page table and the remaining bits are in the page directory */ 938 if (amdgpu_vm_block_size == -1) 939 return; 940 941 if (amdgpu_vm_block_size < 9) { 942 dev_warn(adev->dev, "VM page table size (%d) too small\n", 943 amdgpu_vm_block_size); 944 amdgpu_vm_block_size = -1; 945 } 946 } 947 948 /** 949 * amdgpu_device_check_vm_size - validate the vm size 950 * 951 * @adev: amdgpu_device pointer 952 * 953 * Validates the vm size in GB specified via module parameter. 954 * The VM size is the size of the GPU virtual memory space in GB. 955 */ 956 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev) 957 { 958 /* no need to check the default value */ 959 if (amdgpu_vm_size == -1) 960 return; 961 962 if (amdgpu_vm_size < 1) { 963 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n", 964 amdgpu_vm_size); 965 amdgpu_vm_size = -1; 966 } 967 } 968 969 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev) 970 { 971 struct sysinfo si; 972 bool is_os_64 = (sizeof(void *) == 8) ? true : false; 973 uint64_t total_memory; 974 uint64_t dram_size_seven_GB = 0x1B8000000; 975 uint64_t dram_size_three_GB = 0xB8000000; 976 977 if (amdgpu_smu_memory_pool_size == 0) 978 return; 979 980 if (!is_os_64) { 981 DRM_WARN("Not 64-bit OS, feature not supported\n"); 982 goto def_value; 983 } 984 si_meminfo(&si); 985 total_memory = (uint64_t)si.totalram * si.mem_unit; 986 987 if ((amdgpu_smu_memory_pool_size == 1) || 988 (amdgpu_smu_memory_pool_size == 2)) { 989 if (total_memory < dram_size_three_GB) 990 goto def_value1; 991 } else if ((amdgpu_smu_memory_pool_size == 4) || 992 (amdgpu_smu_memory_pool_size == 8)) { 993 if (total_memory < dram_size_seven_GB) 994 goto def_value1; 995 } else { 996 DRM_WARN("Smu memory pool size not supported\n"); 997 goto def_value; 998 } 999 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28; 1000 1001 return; 1002 1003 def_value1: 1004 DRM_WARN("No enough system memory\n"); 1005 def_value: 1006 adev->pm.smu_prv_buffer_size = 0; 1007 } 1008 1009 /** 1010 * amdgpu_device_check_arguments - validate module params 1011 * 1012 * @adev: amdgpu_device pointer 1013 * 1014 * Validates certain module parameters and updates 1015 * the associated values used by the driver (all asics). 1016 */ 1017 static int amdgpu_device_check_arguments(struct amdgpu_device *adev) 1018 { 1019 int ret = 0; 1020 1021 if (amdgpu_sched_jobs < 4) { 1022 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", 1023 amdgpu_sched_jobs); 1024 amdgpu_sched_jobs = 4; 1025 } else if (!is_power_of_2(amdgpu_sched_jobs)){ 1026 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n", 1027 amdgpu_sched_jobs); 1028 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); 1029 } 1030 1031 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) { 1032 /* gart size must be greater or equal to 32M */ 1033 dev_warn(adev->dev, "gart size (%d) too small\n", 1034 amdgpu_gart_size); 1035 amdgpu_gart_size = -1; 1036 } 1037 1038 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) { 1039 /* gtt size must be greater or equal to 32M */ 1040 dev_warn(adev->dev, "gtt size (%d) too small\n", 1041 amdgpu_gtt_size); 1042 amdgpu_gtt_size = -1; 1043 } 1044 1045 /* valid range is between 4 and 9 inclusive */ 1046 if (amdgpu_vm_fragment_size != -1 && 1047 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) { 1048 dev_warn(adev->dev, "valid range is between 4 and 9\n"); 1049 amdgpu_vm_fragment_size = -1; 1050 } 1051 1052 amdgpu_device_check_smu_prv_buffer_size(adev); 1053 1054 amdgpu_device_check_vm_size(adev); 1055 1056 amdgpu_device_check_block_size(adev); 1057 1058 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); 1059 1060 return ret; 1061 } 1062 1063 /** 1064 * amdgpu_switcheroo_set_state - set switcheroo state 1065 * 1066 * @pdev: pci dev pointer 1067 * @state: vga_switcheroo state 1068 * 1069 * Callback for the switcheroo driver. Suspends or resumes the 1070 * the asics before or after it is powered up using ACPI methods. 1071 */ 1072 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 1073 { 1074 struct drm_device *dev = pci_get_drvdata(pdev); 1075 1076 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF) 1077 return; 1078 1079 if (state == VGA_SWITCHEROO_ON) { 1080 pr_info("amdgpu: switched on\n"); 1081 /* don't suspend or resume card normally */ 1082 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1083 1084 amdgpu_device_resume(dev, true, true); 1085 1086 dev->switch_power_state = DRM_SWITCH_POWER_ON; 1087 drm_kms_helper_poll_enable(dev); 1088 } else { 1089 pr_info("amdgpu: switched off\n"); 1090 drm_kms_helper_poll_disable(dev); 1091 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1092 amdgpu_device_suspend(dev, true, true); 1093 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 1094 } 1095 } 1096 1097 /** 1098 * amdgpu_switcheroo_can_switch - see if switcheroo state can change 1099 * 1100 * @pdev: pci dev pointer 1101 * 1102 * Callback for the switcheroo driver. Check of the switcheroo 1103 * state can be changed. 1104 * Returns true if the state can be changed, false if not. 1105 */ 1106 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev) 1107 { 1108 struct drm_device *dev = pci_get_drvdata(pdev); 1109 1110 /* 1111 * FIXME: open_count is protected by drm_global_mutex but that would lead to 1112 * locking inversion with the driver load path. And the access here is 1113 * completely racy anyway. So don't bother with locking for now. 1114 */ 1115 return dev->open_count == 0; 1116 } 1117 1118 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = { 1119 .set_gpu_state = amdgpu_switcheroo_set_state, 1120 .reprobe = NULL, 1121 .can_switch = amdgpu_switcheroo_can_switch, 1122 }; 1123 1124 /** 1125 * amdgpu_device_ip_set_clockgating_state - set the CG state 1126 * 1127 * @dev: amdgpu_device pointer 1128 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1129 * @state: clockgating state (gate or ungate) 1130 * 1131 * Sets the requested clockgating state for all instances of 1132 * the hardware IP specified. 1133 * Returns the error code from the last instance. 1134 */ 1135 int amdgpu_device_ip_set_clockgating_state(void *dev, 1136 enum amd_ip_block_type block_type, 1137 enum amd_clockgating_state state) 1138 { 1139 struct amdgpu_device *adev = dev; 1140 int i, r = 0; 1141 1142 for (i = 0; i < adev->num_ip_blocks; i++) { 1143 if (!adev->ip_blocks[i].status.valid) 1144 continue; 1145 if (adev->ip_blocks[i].version->type != block_type) 1146 continue; 1147 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state) 1148 continue; 1149 r = adev->ip_blocks[i].version->funcs->set_clockgating_state( 1150 (void *)adev, state); 1151 if (r) 1152 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n", 1153 adev->ip_blocks[i].version->funcs->name, r); 1154 } 1155 return r; 1156 } 1157 1158 /** 1159 * amdgpu_device_ip_set_powergating_state - set the PG state 1160 * 1161 * @dev: amdgpu_device pointer 1162 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1163 * @state: powergating state (gate or ungate) 1164 * 1165 * Sets the requested powergating state for all instances of 1166 * the hardware IP specified. 1167 * Returns the error code from the last instance. 1168 */ 1169 int amdgpu_device_ip_set_powergating_state(void *dev, 1170 enum amd_ip_block_type block_type, 1171 enum amd_powergating_state state) 1172 { 1173 struct amdgpu_device *adev = dev; 1174 int i, r = 0; 1175 1176 for (i = 0; i < adev->num_ip_blocks; i++) { 1177 if (!adev->ip_blocks[i].status.valid) 1178 continue; 1179 if (adev->ip_blocks[i].version->type != block_type) 1180 continue; 1181 if (!adev->ip_blocks[i].version->funcs->set_powergating_state) 1182 continue; 1183 r = adev->ip_blocks[i].version->funcs->set_powergating_state( 1184 (void *)adev, state); 1185 if (r) 1186 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n", 1187 adev->ip_blocks[i].version->funcs->name, r); 1188 } 1189 return r; 1190 } 1191 1192 /** 1193 * amdgpu_device_ip_get_clockgating_state - get the CG state 1194 * 1195 * @adev: amdgpu_device pointer 1196 * @flags: clockgating feature flags 1197 * 1198 * Walks the list of IPs on the device and updates the clockgating 1199 * flags for each IP. 1200 * Updates @flags with the feature flags for each hardware IP where 1201 * clockgating is enabled. 1202 */ 1203 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev, 1204 u32 *flags) 1205 { 1206 int i; 1207 1208 for (i = 0; i < adev->num_ip_blocks; i++) { 1209 if (!adev->ip_blocks[i].status.valid) 1210 continue; 1211 if (adev->ip_blocks[i].version->funcs->get_clockgating_state) 1212 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags); 1213 } 1214 } 1215 1216 /** 1217 * amdgpu_device_ip_wait_for_idle - wait for idle 1218 * 1219 * @adev: amdgpu_device pointer 1220 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1221 * 1222 * Waits for the request hardware IP to be idle. 1223 * Returns 0 for success or a negative error code on failure. 1224 */ 1225 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, 1226 enum amd_ip_block_type block_type) 1227 { 1228 int i, r; 1229 1230 for (i = 0; i < adev->num_ip_blocks; i++) { 1231 if (!adev->ip_blocks[i].status.valid) 1232 continue; 1233 if (adev->ip_blocks[i].version->type == block_type) { 1234 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev); 1235 if (r) 1236 return r; 1237 break; 1238 } 1239 } 1240 return 0; 1241 1242 } 1243 1244 /** 1245 * amdgpu_device_ip_is_idle - is the hardware IP idle 1246 * 1247 * @adev: amdgpu_device pointer 1248 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1249 * 1250 * Check if the hardware IP is idle or not. 1251 * Returns true if it the IP is idle, false if not. 1252 */ 1253 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev, 1254 enum amd_ip_block_type block_type) 1255 { 1256 int i; 1257 1258 for (i = 0; i < adev->num_ip_blocks; i++) { 1259 if (!adev->ip_blocks[i].status.valid) 1260 continue; 1261 if (adev->ip_blocks[i].version->type == block_type) 1262 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev); 1263 } 1264 return true; 1265 1266 } 1267 1268 /** 1269 * amdgpu_device_ip_get_ip_block - get a hw IP pointer 1270 * 1271 * @adev: amdgpu_device pointer 1272 * @type: Type of hardware IP (SMU, GFX, UVD, etc.) 1273 * 1274 * Returns a pointer to the hardware IP block structure 1275 * if it exists for the asic, otherwise NULL. 1276 */ 1277 struct amdgpu_ip_block * 1278 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev, 1279 enum amd_ip_block_type type) 1280 { 1281 int i; 1282 1283 for (i = 0; i < adev->num_ip_blocks; i++) 1284 if (adev->ip_blocks[i].version->type == type) 1285 return &adev->ip_blocks[i]; 1286 1287 return NULL; 1288 } 1289 1290 /** 1291 * amdgpu_device_ip_block_version_cmp 1292 * 1293 * @adev: amdgpu_device pointer 1294 * @type: enum amd_ip_block_type 1295 * @major: major version 1296 * @minor: minor version 1297 * 1298 * return 0 if equal or greater 1299 * return 1 if smaller or the ip_block doesn't exist 1300 */ 1301 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev, 1302 enum amd_ip_block_type type, 1303 u32 major, u32 minor) 1304 { 1305 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type); 1306 1307 if (ip_block && ((ip_block->version->major > major) || 1308 ((ip_block->version->major == major) && 1309 (ip_block->version->minor >= minor)))) 1310 return 0; 1311 1312 return 1; 1313 } 1314 1315 /** 1316 * amdgpu_device_ip_block_add 1317 * 1318 * @adev: amdgpu_device pointer 1319 * @ip_block_version: pointer to the IP to add 1320 * 1321 * Adds the IP block driver information to the collection of IPs 1322 * on the asic. 1323 */ 1324 int amdgpu_device_ip_block_add(struct amdgpu_device *adev, 1325 const struct amdgpu_ip_block_version *ip_block_version) 1326 { 1327 if (!ip_block_version) 1328 return -EINVAL; 1329 1330 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks, 1331 ip_block_version->funcs->name); 1332 1333 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version; 1334 1335 return 0; 1336 } 1337 1338 /** 1339 * amdgpu_device_enable_virtual_display - enable virtual display feature 1340 * 1341 * @adev: amdgpu_device pointer 1342 * 1343 * Enabled the virtual display feature if the user has enabled it via 1344 * the module parameter virtual_display. This feature provides a virtual 1345 * display hardware on headless boards or in virtualized environments. 1346 * This function parses and validates the configuration string specified by 1347 * the user and configues the virtual display configuration (number of 1348 * virtual connectors, crtcs, etc.) specified. 1349 */ 1350 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) 1351 { 1352 adev->enable_virtual_display = false; 1353 1354 if (amdgpu_virtual_display) { 1355 struct drm_device *ddev = adev->ddev; 1356 const char *pci_address_name = pci_name(ddev->pdev); 1357 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname; 1358 1359 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL); 1360 pciaddstr_tmp = pciaddstr; 1361 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) { 1362 pciaddname = strsep(&pciaddname_tmp, ","); 1363 if (!strcmp("all", pciaddname) 1364 || !strcmp(pci_address_name, pciaddname)) { 1365 long num_crtc; 1366 int res = -1; 1367 1368 adev->enable_virtual_display = true; 1369 1370 if (pciaddname_tmp) 1371 res = kstrtol(pciaddname_tmp, 10, 1372 &num_crtc); 1373 1374 if (!res) { 1375 if (num_crtc < 1) 1376 num_crtc = 1; 1377 if (num_crtc > 6) 1378 num_crtc = 6; 1379 adev->mode_info.num_crtc = num_crtc; 1380 } else { 1381 adev->mode_info.num_crtc = 1; 1382 } 1383 break; 1384 } 1385 } 1386 1387 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n", 1388 amdgpu_virtual_display, pci_address_name, 1389 adev->enable_virtual_display, adev->mode_info.num_crtc); 1390 1391 kfree(pciaddstr); 1392 } 1393 } 1394 1395 /** 1396 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware 1397 * 1398 * @adev: amdgpu_device pointer 1399 * 1400 * Parses the asic configuration parameters specified in the gpu info 1401 * firmware and makes them availale to the driver for use in configuring 1402 * the asic. 1403 * Returns 0 on success, -EINVAL on failure. 1404 */ 1405 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) 1406 { 1407 const char *chip_name; 1408 char fw_name[30]; 1409 int err; 1410 const struct gpu_info_firmware_header_v1_0 *hdr; 1411 1412 adev->firmware.gpu_info_fw = NULL; 1413 1414 switch (adev->asic_type) { 1415 case CHIP_TOPAZ: 1416 case CHIP_TONGA: 1417 case CHIP_FIJI: 1418 case CHIP_POLARIS10: 1419 case CHIP_POLARIS11: 1420 case CHIP_POLARIS12: 1421 case CHIP_VEGAM: 1422 case CHIP_CARRIZO: 1423 case CHIP_STONEY: 1424 #ifdef CONFIG_DRM_AMDGPU_SI 1425 case CHIP_VERDE: 1426 case CHIP_TAHITI: 1427 case CHIP_PITCAIRN: 1428 case CHIP_OLAND: 1429 case CHIP_HAINAN: 1430 #endif 1431 #ifdef CONFIG_DRM_AMDGPU_CIK 1432 case CHIP_BONAIRE: 1433 case CHIP_HAWAII: 1434 case CHIP_KAVERI: 1435 case CHIP_KABINI: 1436 case CHIP_MULLINS: 1437 #endif 1438 case CHIP_VEGA20: 1439 default: 1440 return 0; 1441 case CHIP_VEGA10: 1442 chip_name = "vega10"; 1443 break; 1444 case CHIP_VEGA12: 1445 chip_name = "vega12"; 1446 break; 1447 case CHIP_RAVEN: 1448 if (adev->rev_id >= 8) 1449 chip_name = "raven2"; 1450 else if (adev->pdev->device == 0x15d8) 1451 chip_name = "picasso"; 1452 else 1453 chip_name = "raven"; 1454 break; 1455 case CHIP_ARCTURUS: 1456 chip_name = "arcturus"; 1457 break; 1458 case CHIP_RENOIR: 1459 chip_name = "renoir"; 1460 break; 1461 case CHIP_NAVI10: 1462 chip_name = "navi10"; 1463 break; 1464 case CHIP_NAVI14: 1465 chip_name = "navi14"; 1466 break; 1467 case CHIP_NAVI12: 1468 chip_name = "navi12"; 1469 break; 1470 } 1471 1472 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name); 1473 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev); 1474 if (err) { 1475 dev_err(adev->dev, 1476 "Failed to load gpu_info firmware \"%s\"\n", 1477 fw_name); 1478 goto out; 1479 } 1480 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw); 1481 if (err) { 1482 dev_err(adev->dev, 1483 "Failed to validate gpu_info firmware \"%s\"\n", 1484 fw_name); 1485 goto out; 1486 } 1487 1488 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data; 1489 amdgpu_ucode_print_gpu_info_hdr(&hdr->header); 1490 1491 switch (hdr->version_major) { 1492 case 1: 1493 { 1494 const struct gpu_info_firmware_v1_0 *gpu_info_fw = 1495 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data + 1496 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 1497 1498 if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) 1499 goto parse_soc_bounding_box; 1500 1501 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se); 1502 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh); 1503 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se); 1504 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se); 1505 adev->gfx.config.max_texture_channel_caches = 1506 le32_to_cpu(gpu_info_fw->gc_num_tccs); 1507 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs); 1508 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds); 1509 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth); 1510 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth); 1511 adev->gfx.config.double_offchip_lds_buf = 1512 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer); 1513 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size); 1514 adev->gfx.cu_info.max_waves_per_simd = 1515 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd); 1516 adev->gfx.cu_info.max_scratch_slots_per_cu = 1517 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu); 1518 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size); 1519 if (hdr->version_minor >= 1) { 1520 const struct gpu_info_firmware_v1_1 *gpu_info_fw = 1521 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data + 1522 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 1523 adev->gfx.config.num_sc_per_sh = 1524 le32_to_cpu(gpu_info_fw->num_sc_per_sh); 1525 adev->gfx.config.num_packer_per_sc = 1526 le32_to_cpu(gpu_info_fw->num_packer_per_sc); 1527 } 1528 1529 parse_soc_bounding_box: 1530 #ifdef CONFIG_DRM_AMD_DC_DCN2_0 1531 /* 1532 * soc bounding box info is not integrated in disocovery table, 1533 * we always need to parse it from gpu info firmware. 1534 */ 1535 if (hdr->version_minor == 2) { 1536 const struct gpu_info_firmware_v1_2 *gpu_info_fw = 1537 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data + 1538 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 1539 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box; 1540 } 1541 #endif 1542 break; 1543 } 1544 default: 1545 dev_err(adev->dev, 1546 "Unsupported gpu_info table %d\n", hdr->header.ucode_version); 1547 err = -EINVAL; 1548 goto out; 1549 } 1550 out: 1551 return err; 1552 } 1553 1554 /** 1555 * amdgpu_device_ip_early_init - run early init for hardware IPs 1556 * 1557 * @adev: amdgpu_device pointer 1558 * 1559 * Early initialization pass for hardware IPs. The hardware IPs that make 1560 * up each asic are discovered each IP's early_init callback is run. This 1561 * is the first stage in initializing the asic. 1562 * Returns 0 on success, negative error code on failure. 1563 */ 1564 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) 1565 { 1566 int i, r; 1567 1568 amdgpu_device_enable_virtual_display(adev); 1569 1570 switch (adev->asic_type) { 1571 case CHIP_TOPAZ: 1572 case CHIP_TONGA: 1573 case CHIP_FIJI: 1574 case CHIP_POLARIS10: 1575 case CHIP_POLARIS11: 1576 case CHIP_POLARIS12: 1577 case CHIP_VEGAM: 1578 case CHIP_CARRIZO: 1579 case CHIP_STONEY: 1580 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) 1581 adev->family = AMDGPU_FAMILY_CZ; 1582 else 1583 adev->family = AMDGPU_FAMILY_VI; 1584 1585 r = vi_set_ip_blocks(adev); 1586 if (r) 1587 return r; 1588 break; 1589 #ifdef CONFIG_DRM_AMDGPU_SI 1590 case CHIP_VERDE: 1591 case CHIP_TAHITI: 1592 case CHIP_PITCAIRN: 1593 case CHIP_OLAND: 1594 case CHIP_HAINAN: 1595 adev->family = AMDGPU_FAMILY_SI; 1596 r = si_set_ip_blocks(adev); 1597 if (r) 1598 return r; 1599 break; 1600 #endif 1601 #ifdef CONFIG_DRM_AMDGPU_CIK 1602 case CHIP_BONAIRE: 1603 case CHIP_HAWAII: 1604 case CHIP_KAVERI: 1605 case CHIP_KABINI: 1606 case CHIP_MULLINS: 1607 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII)) 1608 adev->family = AMDGPU_FAMILY_CI; 1609 else 1610 adev->family = AMDGPU_FAMILY_KV; 1611 1612 r = cik_set_ip_blocks(adev); 1613 if (r) 1614 return r; 1615 break; 1616 #endif 1617 case CHIP_VEGA10: 1618 case CHIP_VEGA12: 1619 case CHIP_VEGA20: 1620 case CHIP_RAVEN: 1621 case CHIP_ARCTURUS: 1622 case CHIP_RENOIR: 1623 if (adev->asic_type == CHIP_RAVEN || 1624 adev->asic_type == CHIP_RENOIR) 1625 adev->family = AMDGPU_FAMILY_RV; 1626 else 1627 adev->family = AMDGPU_FAMILY_AI; 1628 1629 r = soc15_set_ip_blocks(adev); 1630 if (r) 1631 return r; 1632 break; 1633 case CHIP_NAVI10: 1634 case CHIP_NAVI14: 1635 case CHIP_NAVI12: 1636 adev->family = AMDGPU_FAMILY_NV; 1637 1638 r = nv_set_ip_blocks(adev); 1639 if (r) 1640 return r; 1641 break; 1642 default: 1643 /* FIXME: not supported yet */ 1644 return -EINVAL; 1645 } 1646 1647 r = amdgpu_device_parse_gpu_info_fw(adev); 1648 if (r) 1649 return r; 1650 1651 if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) 1652 amdgpu_discovery_get_gfx_info(adev); 1653 1654 amdgpu_amdkfd_device_probe(adev); 1655 1656 if (amdgpu_sriov_vf(adev)) { 1657 r = amdgpu_virt_request_full_gpu(adev, true); 1658 if (r) 1659 return -EAGAIN; 1660 } 1661 1662 adev->pm.pp_feature = amdgpu_pp_feature_mask; 1663 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS) 1664 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 1665 1666 for (i = 0; i < adev->num_ip_blocks; i++) { 1667 if ((amdgpu_ip_block_mask & (1 << i)) == 0) { 1668 DRM_ERROR("disabled ip block: %d <%s>\n", 1669 i, adev->ip_blocks[i].version->funcs->name); 1670 adev->ip_blocks[i].status.valid = false; 1671 } else { 1672 if (adev->ip_blocks[i].version->funcs->early_init) { 1673 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev); 1674 if (r == -ENOENT) { 1675 adev->ip_blocks[i].status.valid = false; 1676 } else if (r) { 1677 DRM_ERROR("early_init of IP block <%s> failed %d\n", 1678 adev->ip_blocks[i].version->funcs->name, r); 1679 return r; 1680 } else { 1681 adev->ip_blocks[i].status.valid = true; 1682 } 1683 } else { 1684 adev->ip_blocks[i].status.valid = true; 1685 } 1686 } 1687 /* get the vbios after the asic_funcs are set up */ 1688 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { 1689 /* Read BIOS */ 1690 if (!amdgpu_get_bios(adev)) 1691 return -EINVAL; 1692 1693 r = amdgpu_atombios_init(adev); 1694 if (r) { 1695 dev_err(adev->dev, "amdgpu_atombios_init failed\n"); 1696 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0); 1697 return r; 1698 } 1699 } 1700 } 1701 1702 adev->cg_flags &= amdgpu_cg_mask; 1703 adev->pg_flags &= amdgpu_pg_mask; 1704 1705 return 0; 1706 } 1707 1708 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev) 1709 { 1710 int i, r; 1711 1712 for (i = 0; i < adev->num_ip_blocks; i++) { 1713 if (!adev->ip_blocks[i].status.sw) 1714 continue; 1715 if (adev->ip_blocks[i].status.hw) 1716 continue; 1717 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 1718 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) || 1719 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { 1720 r = adev->ip_blocks[i].version->funcs->hw_init(adev); 1721 if (r) { 1722 DRM_ERROR("hw_init of IP block <%s> failed %d\n", 1723 adev->ip_blocks[i].version->funcs->name, r); 1724 return r; 1725 } 1726 adev->ip_blocks[i].status.hw = true; 1727 } 1728 } 1729 1730 return 0; 1731 } 1732 1733 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev) 1734 { 1735 int i, r; 1736 1737 for (i = 0; i < adev->num_ip_blocks; i++) { 1738 if (!adev->ip_blocks[i].status.sw) 1739 continue; 1740 if (adev->ip_blocks[i].status.hw) 1741 continue; 1742 r = adev->ip_blocks[i].version->funcs->hw_init(adev); 1743 if (r) { 1744 DRM_ERROR("hw_init of IP block <%s> failed %d\n", 1745 adev->ip_blocks[i].version->funcs->name, r); 1746 return r; 1747 } 1748 adev->ip_blocks[i].status.hw = true; 1749 } 1750 1751 return 0; 1752 } 1753 1754 static int amdgpu_device_fw_loading(struct amdgpu_device *adev) 1755 { 1756 int r = 0; 1757 int i; 1758 uint32_t smu_version; 1759 1760 if (adev->asic_type >= CHIP_VEGA10) { 1761 for (i = 0; i < adev->num_ip_blocks; i++) { 1762 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP) 1763 continue; 1764 1765 /* no need to do the fw loading again if already done*/ 1766 if (adev->ip_blocks[i].status.hw == true) 1767 break; 1768 1769 if (adev->in_gpu_reset || adev->in_suspend) { 1770 r = adev->ip_blocks[i].version->funcs->resume(adev); 1771 if (r) { 1772 DRM_ERROR("resume of IP block <%s> failed %d\n", 1773 adev->ip_blocks[i].version->funcs->name, r); 1774 return r; 1775 } 1776 } else { 1777 r = adev->ip_blocks[i].version->funcs->hw_init(adev); 1778 if (r) { 1779 DRM_ERROR("hw_init of IP block <%s> failed %d\n", 1780 adev->ip_blocks[i].version->funcs->name, r); 1781 return r; 1782 } 1783 } 1784 1785 adev->ip_blocks[i].status.hw = true; 1786 break; 1787 } 1788 } 1789 1790 r = amdgpu_pm_load_smu_firmware(adev, &smu_version); 1791 1792 return r; 1793 } 1794 1795 /** 1796 * amdgpu_device_ip_init - run init for hardware IPs 1797 * 1798 * @adev: amdgpu_device pointer 1799 * 1800 * Main initialization pass for hardware IPs. The list of all the hardware 1801 * IPs that make up the asic is walked and the sw_init and hw_init callbacks 1802 * are run. sw_init initializes the software state associated with each IP 1803 * and hw_init initializes the hardware associated with each IP. 1804 * Returns 0 on success, negative error code on failure. 1805 */ 1806 static int amdgpu_device_ip_init(struct amdgpu_device *adev) 1807 { 1808 int i, r; 1809 1810 r = amdgpu_ras_init(adev); 1811 if (r) 1812 return r; 1813 1814 for (i = 0; i < adev->num_ip_blocks; i++) { 1815 if (!adev->ip_blocks[i].status.valid) 1816 continue; 1817 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev); 1818 if (r) { 1819 DRM_ERROR("sw_init of IP block <%s> failed %d\n", 1820 adev->ip_blocks[i].version->funcs->name, r); 1821 goto init_failed; 1822 } 1823 adev->ip_blocks[i].status.sw = true; 1824 1825 /* need to do gmc hw init early so we can allocate gpu mem */ 1826 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { 1827 r = amdgpu_device_vram_scratch_init(adev); 1828 if (r) { 1829 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); 1830 goto init_failed; 1831 } 1832 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); 1833 if (r) { 1834 DRM_ERROR("hw_init %d failed %d\n", i, r); 1835 goto init_failed; 1836 } 1837 r = amdgpu_device_wb_init(adev); 1838 if (r) { 1839 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r); 1840 goto init_failed; 1841 } 1842 adev->ip_blocks[i].status.hw = true; 1843 1844 /* right after GMC hw init, we create CSA */ 1845 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { 1846 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj, 1847 AMDGPU_GEM_DOMAIN_VRAM, 1848 AMDGPU_CSA_SIZE); 1849 if (r) { 1850 DRM_ERROR("allocate CSA failed %d\n", r); 1851 goto init_failed; 1852 } 1853 } 1854 } 1855 } 1856 1857 r = amdgpu_ib_pool_init(adev); 1858 if (r) { 1859 dev_err(adev->dev, "IB initialization failed (%d).\n", r); 1860 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r); 1861 goto init_failed; 1862 } 1863 1864 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/ 1865 if (r) 1866 goto init_failed; 1867 1868 r = amdgpu_device_ip_hw_init_phase1(adev); 1869 if (r) 1870 goto init_failed; 1871 1872 r = amdgpu_device_fw_loading(adev); 1873 if (r) 1874 goto init_failed; 1875 1876 r = amdgpu_device_ip_hw_init_phase2(adev); 1877 if (r) 1878 goto init_failed; 1879 1880 /* 1881 * retired pages will be loaded from eeprom and reserved here, 1882 * it should be called after amdgpu_device_ip_hw_init_phase2 since 1883 * for some ASICs the RAS EEPROM code relies on SMU fully functioning 1884 * for I2C communication which only true at this point. 1885 * recovery_init may fail, but it can free all resources allocated by 1886 * itself and its failure should not stop amdgpu init process. 1887 * 1888 * Note: theoretically, this should be called before all vram allocations 1889 * to protect retired page from abusing 1890 */ 1891 amdgpu_ras_recovery_init(adev); 1892 1893 if (adev->gmc.xgmi.num_physical_nodes > 1) 1894 amdgpu_xgmi_add_device(adev); 1895 amdgpu_amdkfd_device_init(adev); 1896 1897 init_failed: 1898 if (amdgpu_sriov_vf(adev)) { 1899 if (!r) 1900 amdgpu_virt_init_data_exchange(adev); 1901 amdgpu_virt_release_full_gpu(adev, true); 1902 } 1903 1904 return r; 1905 } 1906 1907 /** 1908 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer 1909 * 1910 * @adev: amdgpu_device pointer 1911 * 1912 * Writes a reset magic value to the gart pointer in VRAM. The driver calls 1913 * this function before a GPU reset. If the value is retained after a 1914 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents. 1915 */ 1916 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev) 1917 { 1918 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM); 1919 } 1920 1921 /** 1922 * amdgpu_device_check_vram_lost - check if vram is valid 1923 * 1924 * @adev: amdgpu_device pointer 1925 * 1926 * Checks the reset magic value written to the gart pointer in VRAM. 1927 * The driver calls this after a GPU reset to see if the contents of 1928 * VRAM is lost or now. 1929 * returns true if vram is lost, false if not. 1930 */ 1931 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev) 1932 { 1933 return !!memcmp(adev->gart.ptr, adev->reset_magic, 1934 AMDGPU_RESET_MAGIC_NUM); 1935 } 1936 1937 /** 1938 * amdgpu_device_set_cg_state - set clockgating for amdgpu device 1939 * 1940 * @adev: amdgpu_device pointer 1941 * 1942 * The list of all the hardware IPs that make up the asic is walked and the 1943 * set_clockgating_state callbacks are run. 1944 * Late initialization pass enabling clockgating for hardware IPs. 1945 * Fini or suspend, pass disabling clockgating for hardware IPs. 1946 * Returns 0 on success, negative error code on failure. 1947 */ 1948 1949 static int amdgpu_device_set_cg_state(struct amdgpu_device *adev, 1950 enum amd_clockgating_state state) 1951 { 1952 int i, j, r; 1953 1954 if (amdgpu_emu_mode == 1) 1955 return 0; 1956 1957 for (j = 0; j < adev->num_ip_blocks; j++) { 1958 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; 1959 if (!adev->ip_blocks[i].status.late_initialized) 1960 continue; 1961 /* skip CG for VCE/UVD, it's handled specially */ 1962 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 1963 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && 1964 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && 1965 adev->ip_blocks[i].version->funcs->set_clockgating_state) { 1966 /* enable clockgating to save power */ 1967 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, 1968 state); 1969 if (r) { 1970 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", 1971 adev->ip_blocks[i].version->funcs->name, r); 1972 return r; 1973 } 1974 } 1975 } 1976 1977 return 0; 1978 } 1979 1980 static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state) 1981 { 1982 int i, j, r; 1983 1984 if (amdgpu_emu_mode == 1) 1985 return 0; 1986 1987 for (j = 0; j < adev->num_ip_blocks; j++) { 1988 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; 1989 if (!adev->ip_blocks[i].status.late_initialized) 1990 continue; 1991 /* skip CG for VCE/UVD, it's handled specially */ 1992 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 1993 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && 1994 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && 1995 adev->ip_blocks[i].version->funcs->set_powergating_state) { 1996 /* enable powergating to save power */ 1997 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev, 1998 state); 1999 if (r) { 2000 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n", 2001 adev->ip_blocks[i].version->funcs->name, r); 2002 return r; 2003 } 2004 } 2005 } 2006 return 0; 2007 } 2008 2009 static int amdgpu_device_enable_mgpu_fan_boost(void) 2010 { 2011 struct amdgpu_gpu_instance *gpu_ins; 2012 struct amdgpu_device *adev; 2013 int i, ret = 0; 2014 2015 mutex_lock(&mgpu_info.mutex); 2016 2017 /* 2018 * MGPU fan boost feature should be enabled 2019 * only when there are two or more dGPUs in 2020 * the system 2021 */ 2022 if (mgpu_info.num_dgpu < 2) 2023 goto out; 2024 2025 for (i = 0; i < mgpu_info.num_dgpu; i++) { 2026 gpu_ins = &(mgpu_info.gpu_ins[i]); 2027 adev = gpu_ins->adev; 2028 if (!(adev->flags & AMD_IS_APU) && 2029 !gpu_ins->mgpu_fan_enabled && 2030 adev->powerplay.pp_funcs && 2031 adev->powerplay.pp_funcs->enable_mgpu_fan_boost) { 2032 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev); 2033 if (ret) 2034 break; 2035 2036 gpu_ins->mgpu_fan_enabled = 1; 2037 } 2038 } 2039 2040 out: 2041 mutex_unlock(&mgpu_info.mutex); 2042 2043 return ret; 2044 } 2045 2046 /** 2047 * amdgpu_device_ip_late_init - run late init for hardware IPs 2048 * 2049 * @adev: amdgpu_device pointer 2050 * 2051 * Late initialization pass for hardware IPs. The list of all the hardware 2052 * IPs that make up the asic is walked and the late_init callbacks are run. 2053 * late_init covers any special initialization that an IP requires 2054 * after all of the have been initialized or something that needs to happen 2055 * late in the init process. 2056 * Returns 0 on success, negative error code on failure. 2057 */ 2058 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) 2059 { 2060 struct amdgpu_gpu_instance *gpu_instance; 2061 int i = 0, r; 2062 2063 for (i = 0; i < adev->num_ip_blocks; i++) { 2064 if (!adev->ip_blocks[i].status.hw) 2065 continue; 2066 if (adev->ip_blocks[i].version->funcs->late_init) { 2067 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev); 2068 if (r) { 2069 DRM_ERROR("late_init of IP block <%s> failed %d\n", 2070 adev->ip_blocks[i].version->funcs->name, r); 2071 return r; 2072 } 2073 } 2074 adev->ip_blocks[i].status.late_initialized = true; 2075 } 2076 2077 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE); 2078 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE); 2079 2080 amdgpu_device_fill_reset_magic(adev); 2081 2082 r = amdgpu_device_enable_mgpu_fan_boost(); 2083 if (r) 2084 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r); 2085 2086 2087 if (adev->gmc.xgmi.num_physical_nodes > 1) { 2088 mutex_lock(&mgpu_info.mutex); 2089 2090 /* 2091 * Reset device p-state to low as this was booted with high. 2092 * 2093 * This should be performed only after all devices from the same 2094 * hive get initialized. 2095 * 2096 * However, it's unknown how many device in the hive in advance. 2097 * As this is counted one by one during devices initializations. 2098 * 2099 * So, we wait for all XGMI interlinked devices initialized. 2100 * This may bring some delays as those devices may come from 2101 * different hives. But that should be OK. 2102 */ 2103 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) { 2104 for (i = 0; i < mgpu_info.num_gpu; i++) { 2105 gpu_instance = &(mgpu_info.gpu_ins[i]); 2106 if (gpu_instance->adev->flags & AMD_IS_APU) 2107 continue; 2108 2109 r = amdgpu_xgmi_set_pstate(gpu_instance->adev, 0); 2110 if (r) { 2111 DRM_ERROR("pstate setting failed (%d).\n", r); 2112 break; 2113 } 2114 } 2115 } 2116 2117 mutex_unlock(&mgpu_info.mutex); 2118 } 2119 2120 return 0; 2121 } 2122 2123 /** 2124 * amdgpu_device_ip_fini - run fini for hardware IPs 2125 * 2126 * @adev: amdgpu_device pointer 2127 * 2128 * Main teardown pass for hardware IPs. The list of all the hardware 2129 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks 2130 * are run. hw_fini tears down the hardware associated with each IP 2131 * and sw_fini tears down any software state associated with each IP. 2132 * Returns 0 on success, negative error code on failure. 2133 */ 2134 static int amdgpu_device_ip_fini(struct amdgpu_device *adev) 2135 { 2136 int i, r; 2137 2138 amdgpu_ras_pre_fini(adev); 2139 2140 if (adev->gmc.xgmi.num_physical_nodes > 1) 2141 amdgpu_xgmi_remove_device(adev); 2142 2143 amdgpu_amdkfd_device_fini(adev); 2144 2145 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); 2146 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); 2147 2148 /* need to disable SMC first */ 2149 for (i = 0; i < adev->num_ip_blocks; i++) { 2150 if (!adev->ip_blocks[i].status.hw) 2151 continue; 2152 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { 2153 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); 2154 /* XXX handle errors */ 2155 if (r) { 2156 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", 2157 adev->ip_blocks[i].version->funcs->name, r); 2158 } 2159 adev->ip_blocks[i].status.hw = false; 2160 break; 2161 } 2162 } 2163 2164 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2165 if (!adev->ip_blocks[i].status.hw) 2166 continue; 2167 2168 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); 2169 /* XXX handle errors */ 2170 if (r) { 2171 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", 2172 adev->ip_blocks[i].version->funcs->name, r); 2173 } 2174 2175 adev->ip_blocks[i].status.hw = false; 2176 } 2177 2178 2179 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2180 if (!adev->ip_blocks[i].status.sw) 2181 continue; 2182 2183 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { 2184 amdgpu_ucode_free_bo(adev); 2185 amdgpu_free_static_csa(&adev->virt.csa_obj); 2186 amdgpu_device_wb_fini(adev); 2187 amdgpu_device_vram_scratch_fini(adev); 2188 amdgpu_ib_pool_fini(adev); 2189 } 2190 2191 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev); 2192 /* XXX handle errors */ 2193 if (r) { 2194 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", 2195 adev->ip_blocks[i].version->funcs->name, r); 2196 } 2197 adev->ip_blocks[i].status.sw = false; 2198 adev->ip_blocks[i].status.valid = false; 2199 } 2200 2201 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2202 if (!adev->ip_blocks[i].status.late_initialized) 2203 continue; 2204 if (adev->ip_blocks[i].version->funcs->late_fini) 2205 adev->ip_blocks[i].version->funcs->late_fini((void *)adev); 2206 adev->ip_blocks[i].status.late_initialized = false; 2207 } 2208 2209 amdgpu_ras_fini(adev); 2210 2211 if (amdgpu_sriov_vf(adev)) 2212 if (amdgpu_virt_release_full_gpu(adev, false)) 2213 DRM_ERROR("failed to release exclusive mode on fini\n"); 2214 2215 return 0; 2216 } 2217 2218 /** 2219 * amdgpu_device_delayed_init_work_handler - work handler for IB tests 2220 * 2221 * @work: work_struct. 2222 */ 2223 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work) 2224 { 2225 struct amdgpu_device *adev = 2226 container_of(work, struct amdgpu_device, delayed_init_work.work); 2227 int r; 2228 2229 r = amdgpu_ib_ring_tests(adev); 2230 if (r) 2231 DRM_ERROR("ib ring test failed (%d).\n", r); 2232 } 2233 2234 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work) 2235 { 2236 struct amdgpu_device *adev = 2237 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work); 2238 2239 mutex_lock(&adev->gfx.gfx_off_mutex); 2240 if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) { 2241 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true)) 2242 adev->gfx.gfx_off_state = true; 2243 } 2244 mutex_unlock(&adev->gfx.gfx_off_mutex); 2245 } 2246 2247 /** 2248 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1) 2249 * 2250 * @adev: amdgpu_device pointer 2251 * 2252 * Main suspend function for hardware IPs. The list of all the hardware 2253 * IPs that make up the asic is walked, clockgating is disabled and the 2254 * suspend callbacks are run. suspend puts the hardware and software state 2255 * in each IP into a state suitable for suspend. 2256 * Returns 0 on success, negative error code on failure. 2257 */ 2258 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) 2259 { 2260 int i, r; 2261 2262 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); 2263 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); 2264 2265 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2266 if (!adev->ip_blocks[i].status.valid) 2267 continue; 2268 /* displays are handled separately */ 2269 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) { 2270 /* XXX handle errors */ 2271 r = adev->ip_blocks[i].version->funcs->suspend(adev); 2272 /* XXX handle errors */ 2273 if (r) { 2274 DRM_ERROR("suspend of IP block <%s> failed %d\n", 2275 adev->ip_blocks[i].version->funcs->name, r); 2276 return r; 2277 } 2278 adev->ip_blocks[i].status.hw = false; 2279 } 2280 } 2281 2282 return 0; 2283 } 2284 2285 /** 2286 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2) 2287 * 2288 * @adev: amdgpu_device pointer 2289 * 2290 * Main suspend function for hardware IPs. The list of all the hardware 2291 * IPs that make up the asic is walked, clockgating is disabled and the 2292 * suspend callbacks are run. suspend puts the hardware and software state 2293 * in each IP into a state suitable for suspend. 2294 * Returns 0 on success, negative error code on failure. 2295 */ 2296 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) 2297 { 2298 int i, r; 2299 2300 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2301 if (!adev->ip_blocks[i].status.valid) 2302 continue; 2303 /* displays are handled in phase1 */ 2304 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) 2305 continue; 2306 /* PSP lost connection when err_event_athub occurs */ 2307 if (amdgpu_ras_intr_triggered() && 2308 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { 2309 adev->ip_blocks[i].status.hw = false; 2310 continue; 2311 } 2312 /* XXX handle errors */ 2313 r = adev->ip_blocks[i].version->funcs->suspend(adev); 2314 /* XXX handle errors */ 2315 if (r) { 2316 DRM_ERROR("suspend of IP block <%s> failed %d\n", 2317 adev->ip_blocks[i].version->funcs->name, r); 2318 } 2319 adev->ip_blocks[i].status.hw = false; 2320 /* handle putting the SMC in the appropriate state */ 2321 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { 2322 if (is_support_sw_smu(adev)) { 2323 r = smu_set_mp1_state(&adev->smu, adev->mp1_state); 2324 } else if (adev->powerplay.pp_funcs && 2325 adev->powerplay.pp_funcs->set_mp1_state) { 2326 r = adev->powerplay.pp_funcs->set_mp1_state( 2327 adev->powerplay.pp_handle, 2328 adev->mp1_state); 2329 } 2330 if (r) { 2331 DRM_ERROR("SMC failed to set mp1 state %d, %d\n", 2332 adev->mp1_state, r); 2333 return r; 2334 } 2335 } 2336 2337 adev->ip_blocks[i].status.hw = false; 2338 } 2339 2340 return 0; 2341 } 2342 2343 /** 2344 * amdgpu_device_ip_suspend - run suspend for hardware IPs 2345 * 2346 * @adev: amdgpu_device pointer 2347 * 2348 * Main suspend function for hardware IPs. The list of all the hardware 2349 * IPs that make up the asic is walked, clockgating is disabled and the 2350 * suspend callbacks are run. suspend puts the hardware and software state 2351 * in each IP into a state suitable for suspend. 2352 * Returns 0 on success, negative error code on failure. 2353 */ 2354 int amdgpu_device_ip_suspend(struct amdgpu_device *adev) 2355 { 2356 int r; 2357 2358 if (amdgpu_sriov_vf(adev)) 2359 amdgpu_virt_request_full_gpu(adev, false); 2360 2361 r = amdgpu_device_ip_suspend_phase1(adev); 2362 if (r) 2363 return r; 2364 r = amdgpu_device_ip_suspend_phase2(adev); 2365 2366 if (amdgpu_sriov_vf(adev)) 2367 amdgpu_virt_release_full_gpu(adev, false); 2368 2369 return r; 2370 } 2371 2372 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) 2373 { 2374 int i, r; 2375 2376 static enum amd_ip_block_type ip_order[] = { 2377 AMD_IP_BLOCK_TYPE_GMC, 2378 AMD_IP_BLOCK_TYPE_COMMON, 2379 AMD_IP_BLOCK_TYPE_PSP, 2380 AMD_IP_BLOCK_TYPE_IH, 2381 }; 2382 2383 for (i = 0; i < ARRAY_SIZE(ip_order); i++) { 2384 int j; 2385 struct amdgpu_ip_block *block; 2386 2387 for (j = 0; j < adev->num_ip_blocks; j++) { 2388 block = &adev->ip_blocks[j]; 2389 2390 block->status.hw = false; 2391 if (block->version->type != ip_order[i] || 2392 !block->status.valid) 2393 continue; 2394 2395 r = block->version->funcs->hw_init(adev); 2396 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); 2397 if (r) 2398 return r; 2399 block->status.hw = true; 2400 } 2401 } 2402 2403 return 0; 2404 } 2405 2406 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) 2407 { 2408 int i, r; 2409 2410 static enum amd_ip_block_type ip_order[] = { 2411 AMD_IP_BLOCK_TYPE_SMC, 2412 AMD_IP_BLOCK_TYPE_DCE, 2413 AMD_IP_BLOCK_TYPE_GFX, 2414 AMD_IP_BLOCK_TYPE_SDMA, 2415 AMD_IP_BLOCK_TYPE_UVD, 2416 AMD_IP_BLOCK_TYPE_VCE 2417 }; 2418 2419 for (i = 0; i < ARRAY_SIZE(ip_order); i++) { 2420 int j; 2421 struct amdgpu_ip_block *block; 2422 2423 for (j = 0; j < adev->num_ip_blocks; j++) { 2424 block = &adev->ip_blocks[j]; 2425 2426 if (block->version->type != ip_order[i] || 2427 !block->status.valid || 2428 block->status.hw) 2429 continue; 2430 2431 r = block->version->funcs->hw_init(adev); 2432 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); 2433 if (r) 2434 return r; 2435 block->status.hw = true; 2436 } 2437 } 2438 2439 return 0; 2440 } 2441 2442 /** 2443 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs 2444 * 2445 * @adev: amdgpu_device pointer 2446 * 2447 * First resume function for hardware IPs. The list of all the hardware 2448 * IPs that make up the asic is walked and the resume callbacks are run for 2449 * COMMON, GMC, and IH. resume puts the hardware into a functional state 2450 * after a suspend and updates the software state as necessary. This 2451 * function is also used for restoring the GPU after a GPU reset. 2452 * Returns 0 on success, negative error code on failure. 2453 */ 2454 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev) 2455 { 2456 int i, r; 2457 2458 for (i = 0; i < adev->num_ip_blocks; i++) { 2459 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) 2460 continue; 2461 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 2462 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 2463 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { 2464 2465 r = adev->ip_blocks[i].version->funcs->resume(adev); 2466 if (r) { 2467 DRM_ERROR("resume of IP block <%s> failed %d\n", 2468 adev->ip_blocks[i].version->funcs->name, r); 2469 return r; 2470 } 2471 adev->ip_blocks[i].status.hw = true; 2472 } 2473 } 2474 2475 return 0; 2476 } 2477 2478 /** 2479 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs 2480 * 2481 * @adev: amdgpu_device pointer 2482 * 2483 * First resume function for hardware IPs. The list of all the hardware 2484 * IPs that make up the asic is walked and the resume callbacks are run for 2485 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a 2486 * functional state after a suspend and updates the software state as 2487 * necessary. This function is also used for restoring the GPU after a GPU 2488 * reset. 2489 * Returns 0 on success, negative error code on failure. 2490 */ 2491 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) 2492 { 2493 int i, r; 2494 2495 for (i = 0; i < adev->num_ip_blocks; i++) { 2496 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) 2497 continue; 2498 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 2499 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 2500 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || 2501 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) 2502 continue; 2503 r = adev->ip_blocks[i].version->funcs->resume(adev); 2504 if (r) { 2505 DRM_ERROR("resume of IP block <%s> failed %d\n", 2506 adev->ip_blocks[i].version->funcs->name, r); 2507 return r; 2508 } 2509 adev->ip_blocks[i].status.hw = true; 2510 } 2511 2512 return 0; 2513 } 2514 2515 /** 2516 * amdgpu_device_ip_resume - run resume for hardware IPs 2517 * 2518 * @adev: amdgpu_device pointer 2519 * 2520 * Main resume function for hardware IPs. The hardware IPs 2521 * are split into two resume functions because they are 2522 * are also used in in recovering from a GPU reset and some additional 2523 * steps need to be take between them. In this case (S3/S4) they are 2524 * run sequentially. 2525 * Returns 0 on success, negative error code on failure. 2526 */ 2527 static int amdgpu_device_ip_resume(struct amdgpu_device *adev) 2528 { 2529 int r; 2530 2531 r = amdgpu_device_ip_resume_phase1(adev); 2532 if (r) 2533 return r; 2534 2535 r = amdgpu_device_fw_loading(adev); 2536 if (r) 2537 return r; 2538 2539 r = amdgpu_device_ip_resume_phase2(adev); 2540 2541 return r; 2542 } 2543 2544 /** 2545 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV 2546 * 2547 * @adev: amdgpu_device pointer 2548 * 2549 * Query the VBIOS data tables to determine if the board supports SR-IOV. 2550 */ 2551 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) 2552 { 2553 if (amdgpu_sriov_vf(adev)) { 2554 if (adev->is_atom_fw) { 2555 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev)) 2556 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; 2557 } else { 2558 if (amdgpu_atombios_has_gpu_virtualization_table(adev)) 2559 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; 2560 } 2561 2562 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)) 2563 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0); 2564 } 2565 } 2566 2567 /** 2568 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic 2569 * 2570 * @asic_type: AMD asic type 2571 * 2572 * Check if there is DC (new modesetting infrastructre) support for an asic. 2573 * returns true if DC has support, false if not. 2574 */ 2575 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) 2576 { 2577 switch (asic_type) { 2578 #if defined(CONFIG_DRM_AMD_DC) 2579 case CHIP_BONAIRE: 2580 case CHIP_KAVERI: 2581 case CHIP_KABINI: 2582 case CHIP_MULLINS: 2583 /* 2584 * We have systems in the wild with these ASICs that require 2585 * LVDS and VGA support which is not supported with DC. 2586 * 2587 * Fallback to the non-DC driver here by default so as not to 2588 * cause regressions. 2589 */ 2590 return amdgpu_dc > 0; 2591 case CHIP_HAWAII: 2592 case CHIP_CARRIZO: 2593 case CHIP_STONEY: 2594 case CHIP_POLARIS10: 2595 case CHIP_POLARIS11: 2596 case CHIP_POLARIS12: 2597 case CHIP_VEGAM: 2598 case CHIP_TONGA: 2599 case CHIP_FIJI: 2600 case CHIP_VEGA10: 2601 case CHIP_VEGA12: 2602 case CHIP_VEGA20: 2603 #if defined(CONFIG_DRM_AMD_DC_DCN1_0) 2604 case CHIP_RAVEN: 2605 #endif 2606 #if defined(CONFIG_DRM_AMD_DC_DCN2_0) 2607 case CHIP_NAVI10: 2608 case CHIP_NAVI14: 2609 case CHIP_NAVI12: 2610 #endif 2611 #if defined(CONFIG_DRM_AMD_DC_DCN2_1) 2612 case CHIP_RENOIR: 2613 #endif 2614 return amdgpu_dc != 0; 2615 #endif 2616 default: 2617 return false; 2618 } 2619 } 2620 2621 /** 2622 * amdgpu_device_has_dc_support - check if dc is supported 2623 * 2624 * @adev: amdgpu_device_pointer 2625 * 2626 * Returns true for supported, false for not supported 2627 */ 2628 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev) 2629 { 2630 if (amdgpu_sriov_vf(adev)) 2631 return false; 2632 2633 return amdgpu_device_asic_has_dc_support(adev->asic_type); 2634 } 2635 2636 2637 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) 2638 { 2639 struct amdgpu_device *adev = 2640 container_of(__work, struct amdgpu_device, xgmi_reset_work); 2641 2642 adev->asic_reset_res = amdgpu_asic_reset(adev); 2643 if (adev->asic_reset_res) 2644 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s", 2645 adev->asic_reset_res, adev->ddev->unique); 2646 } 2647 2648 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) 2649 { 2650 char *input = amdgpu_lockup_timeout; 2651 char *timeout_setting = NULL; 2652 int index = 0; 2653 long timeout; 2654 int ret = 0; 2655 2656 /* 2657 * By default timeout for non compute jobs is 10000. 2658 * And there is no timeout enforced on compute jobs. 2659 * In SR-IOV or passthrough mode, timeout for compute 2660 * jobs are 10000 by default. 2661 */ 2662 adev->gfx_timeout = msecs_to_jiffies(10000); 2663 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; 2664 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev)) 2665 adev->compute_timeout = adev->gfx_timeout; 2666 else 2667 adev->compute_timeout = MAX_SCHEDULE_TIMEOUT; 2668 2669 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { 2670 while ((timeout_setting = strsep(&input, ",")) && 2671 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { 2672 ret = kstrtol(timeout_setting, 0, &timeout); 2673 if (ret) 2674 return ret; 2675 2676 if (timeout == 0) { 2677 index++; 2678 continue; 2679 } else if (timeout < 0) { 2680 timeout = MAX_SCHEDULE_TIMEOUT; 2681 } else { 2682 timeout = msecs_to_jiffies(timeout); 2683 } 2684 2685 switch (index++) { 2686 case 0: 2687 adev->gfx_timeout = timeout; 2688 break; 2689 case 1: 2690 adev->compute_timeout = timeout; 2691 break; 2692 case 2: 2693 adev->sdma_timeout = timeout; 2694 break; 2695 case 3: 2696 adev->video_timeout = timeout; 2697 break; 2698 default: 2699 break; 2700 } 2701 } 2702 /* 2703 * There is only one value specified and 2704 * it should apply to all non-compute jobs. 2705 */ 2706 if (index == 1) { 2707 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; 2708 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev)) 2709 adev->compute_timeout = adev->gfx_timeout; 2710 } 2711 } 2712 2713 return ret; 2714 } 2715 2716 /** 2717 * amdgpu_device_init - initialize the driver 2718 * 2719 * @adev: amdgpu_device pointer 2720 * @ddev: drm dev pointer 2721 * @pdev: pci dev pointer 2722 * @flags: driver flags 2723 * 2724 * Initializes the driver info and hw (all asics). 2725 * Returns 0 for success or an error on failure. 2726 * Called at driver startup. 2727 */ 2728 int amdgpu_device_init(struct amdgpu_device *adev, 2729 struct drm_device *ddev, 2730 struct pci_dev *pdev, 2731 uint32_t flags) 2732 { 2733 int r, i; 2734 bool runtime = false; 2735 u32 max_MBps; 2736 2737 adev->shutdown = false; 2738 adev->dev = &pdev->dev; 2739 adev->ddev = ddev; 2740 adev->pdev = pdev; 2741 adev->flags = flags; 2742 2743 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST) 2744 adev->asic_type = amdgpu_force_asic_type; 2745 else 2746 adev->asic_type = flags & AMD_ASIC_MASK; 2747 2748 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; 2749 if (amdgpu_emu_mode == 1) 2750 adev->usec_timeout *= 2; 2751 adev->gmc.gart_size = 512 * 1024 * 1024; 2752 adev->accel_working = false; 2753 adev->num_rings = 0; 2754 adev->mman.buffer_funcs = NULL; 2755 adev->mman.buffer_funcs_ring = NULL; 2756 adev->vm_manager.vm_pte_funcs = NULL; 2757 adev->vm_manager.vm_pte_num_rqs = 0; 2758 adev->gmc.gmc_funcs = NULL; 2759 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); 2760 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 2761 2762 adev->smc_rreg = &amdgpu_invalid_rreg; 2763 adev->smc_wreg = &amdgpu_invalid_wreg; 2764 adev->pcie_rreg = &amdgpu_invalid_rreg; 2765 adev->pcie_wreg = &amdgpu_invalid_wreg; 2766 adev->pciep_rreg = &amdgpu_invalid_rreg; 2767 adev->pciep_wreg = &amdgpu_invalid_wreg; 2768 adev->pcie_rreg64 = &amdgpu_invalid_rreg64; 2769 adev->pcie_wreg64 = &amdgpu_invalid_wreg64; 2770 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg; 2771 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg; 2772 adev->didt_rreg = &amdgpu_invalid_rreg; 2773 adev->didt_wreg = &amdgpu_invalid_wreg; 2774 adev->gc_cac_rreg = &amdgpu_invalid_rreg; 2775 adev->gc_cac_wreg = &amdgpu_invalid_wreg; 2776 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; 2777 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; 2778 2779 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", 2780 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, 2781 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); 2782 2783 /* mutex initialization are all done here so we 2784 * can recall function without having locking issues */ 2785 atomic_set(&adev->irq.ih.lock, 0); 2786 mutex_init(&adev->firmware.mutex); 2787 mutex_init(&adev->pm.mutex); 2788 mutex_init(&adev->gfx.gpu_clock_mutex); 2789 mutex_init(&adev->srbm_mutex); 2790 mutex_init(&adev->gfx.pipe_reserve_mutex); 2791 mutex_init(&adev->gfx.gfx_off_mutex); 2792 mutex_init(&adev->grbm_idx_mutex); 2793 mutex_init(&adev->mn_lock); 2794 mutex_init(&adev->virt.vf_errors.lock); 2795 hash_init(adev->mn_hash); 2796 mutex_init(&adev->lock_reset); 2797 mutex_init(&adev->virt.dpm_mutex); 2798 mutex_init(&adev->psp.mutex); 2799 2800 r = amdgpu_device_check_arguments(adev); 2801 if (r) 2802 return r; 2803 2804 spin_lock_init(&adev->mmio_idx_lock); 2805 spin_lock_init(&adev->smc_idx_lock); 2806 spin_lock_init(&adev->pcie_idx_lock); 2807 spin_lock_init(&adev->uvd_ctx_idx_lock); 2808 spin_lock_init(&adev->didt_idx_lock); 2809 spin_lock_init(&adev->gc_cac_idx_lock); 2810 spin_lock_init(&adev->se_cac_idx_lock); 2811 spin_lock_init(&adev->audio_endpt_idx_lock); 2812 spin_lock_init(&adev->mm_stats.lock); 2813 2814 INIT_LIST_HEAD(&adev->shadow_list); 2815 mutex_init(&adev->shadow_list_lock); 2816 2817 INIT_LIST_HEAD(&adev->ring_lru_list); 2818 spin_lock_init(&adev->ring_lru_list_lock); 2819 2820 INIT_DELAYED_WORK(&adev->delayed_init_work, 2821 amdgpu_device_delayed_init_work_handler); 2822 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work, 2823 amdgpu_device_delay_enable_gfx_off); 2824 2825 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func); 2826 2827 adev->gfx.gfx_off_req_count = 1; 2828 adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false; 2829 2830 /* Registers mapping */ 2831 /* TODO: block userspace mapping of io register */ 2832 if (adev->asic_type >= CHIP_BONAIRE) { 2833 adev->rmmio_base = pci_resource_start(adev->pdev, 5); 2834 adev->rmmio_size = pci_resource_len(adev->pdev, 5); 2835 } else { 2836 adev->rmmio_base = pci_resource_start(adev->pdev, 2); 2837 adev->rmmio_size = pci_resource_len(adev->pdev, 2); 2838 } 2839 2840 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); 2841 if (adev->rmmio == NULL) { 2842 return -ENOMEM; 2843 } 2844 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); 2845 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); 2846 2847 /* io port mapping */ 2848 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 2849 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) { 2850 adev->rio_mem_size = pci_resource_len(adev->pdev, i); 2851 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size); 2852 break; 2853 } 2854 } 2855 if (adev->rio_mem == NULL) 2856 DRM_INFO("PCI I/O BAR is not found.\n"); 2857 2858 /* enable PCIE atomic ops */ 2859 r = pci_enable_atomic_ops_to_root(adev->pdev, 2860 PCI_EXP_DEVCAP2_ATOMIC_COMP32 | 2861 PCI_EXP_DEVCAP2_ATOMIC_COMP64); 2862 if (r) { 2863 adev->have_atomics_support = false; 2864 DRM_INFO("PCIE atomic ops is not supported\n"); 2865 } else { 2866 adev->have_atomics_support = true; 2867 } 2868 2869 amdgpu_device_get_pcie_info(adev); 2870 2871 if (amdgpu_mcbp) 2872 DRM_INFO("MCBP is enabled\n"); 2873 2874 if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10) 2875 adev->enable_mes = true; 2876 2877 if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) { 2878 r = amdgpu_discovery_init(adev); 2879 if (r) { 2880 dev_err(adev->dev, "amdgpu_discovery_init failed\n"); 2881 return r; 2882 } 2883 } 2884 2885 /* early init functions */ 2886 r = amdgpu_device_ip_early_init(adev); 2887 if (r) 2888 return r; 2889 2890 r = amdgpu_device_get_job_timeout_settings(adev); 2891 if (r) { 2892 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n"); 2893 return r; 2894 } 2895 2896 /* doorbell bar mapping and doorbell index init*/ 2897 amdgpu_device_doorbell_init(adev); 2898 2899 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */ 2900 /* this will fail for cards that aren't VGA class devices, just 2901 * ignore it */ 2902 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode); 2903 2904 if (amdgpu_device_is_px(ddev)) 2905 runtime = true; 2906 if (!pci_is_thunderbolt_attached(adev->pdev)) 2907 vga_switcheroo_register_client(adev->pdev, 2908 &amdgpu_switcheroo_ops, runtime); 2909 if (runtime) 2910 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); 2911 2912 if (amdgpu_emu_mode == 1) { 2913 /* post the asic on emulation mode */ 2914 emu_soc_asic_init(adev); 2915 goto fence_driver_init; 2916 } 2917 2918 /* detect if we are with an SRIOV vbios */ 2919 amdgpu_device_detect_sriov_bios(adev); 2920 2921 /* check if we need to reset the asic 2922 * E.g., driver was not cleanly unloaded previously, etc. 2923 */ 2924 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) { 2925 r = amdgpu_asic_reset(adev); 2926 if (r) { 2927 dev_err(adev->dev, "asic reset on init failed\n"); 2928 goto failed; 2929 } 2930 } 2931 2932 /* Post card if necessary */ 2933 if (amdgpu_device_need_post(adev)) { 2934 if (!adev->bios) { 2935 dev_err(adev->dev, "no vBIOS found\n"); 2936 r = -EINVAL; 2937 goto failed; 2938 } 2939 DRM_INFO("GPU posting now...\n"); 2940 r = amdgpu_atom_asic_init(adev->mode_info.atom_context); 2941 if (r) { 2942 dev_err(adev->dev, "gpu post error!\n"); 2943 goto failed; 2944 } 2945 } 2946 2947 if (adev->is_atom_fw) { 2948 /* Initialize clocks */ 2949 r = amdgpu_atomfirmware_get_clock_info(adev); 2950 if (r) { 2951 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n"); 2952 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); 2953 goto failed; 2954 } 2955 } else { 2956 /* Initialize clocks */ 2957 r = amdgpu_atombios_get_clock_info(adev); 2958 if (r) { 2959 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); 2960 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); 2961 goto failed; 2962 } 2963 /* init i2c buses */ 2964 if (!amdgpu_device_has_dc_support(adev)) 2965 amdgpu_atombios_i2c_init(adev); 2966 } 2967 2968 fence_driver_init: 2969 /* Fence driver */ 2970 r = amdgpu_fence_driver_init(adev); 2971 if (r) { 2972 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n"); 2973 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0); 2974 goto failed; 2975 } 2976 2977 /* init the mode config */ 2978 drm_mode_config_init(adev->ddev); 2979 2980 r = amdgpu_device_ip_init(adev); 2981 if (r) { 2982 /* failed in exclusive mode due to timeout */ 2983 if (amdgpu_sriov_vf(adev) && 2984 !amdgpu_sriov_runtime(adev) && 2985 amdgpu_virt_mmio_blocked(adev) && 2986 !amdgpu_virt_wait_reset(adev)) { 2987 dev_err(adev->dev, "VF exclusive mode timeout\n"); 2988 /* Don't send request since VF is inactive. */ 2989 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; 2990 adev->virt.ops = NULL; 2991 r = -EAGAIN; 2992 goto failed; 2993 } 2994 dev_err(adev->dev, "amdgpu_device_ip_init failed\n"); 2995 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); 2996 if (amdgpu_virt_request_full_gpu(adev, false)) 2997 amdgpu_virt_release_full_gpu(adev, false); 2998 goto failed; 2999 } 3000 3001 adev->accel_working = true; 3002 3003 amdgpu_vm_check_compute_bug(adev); 3004 3005 /* Initialize the buffer migration limit. */ 3006 if (amdgpu_moverate >= 0) 3007 max_MBps = amdgpu_moverate; 3008 else 3009 max_MBps = 8; /* Allow 8 MB/s. */ 3010 /* Get a log2 for easy divisions. */ 3011 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps)); 3012 3013 amdgpu_fbdev_init(adev); 3014 3015 if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev)) 3016 amdgpu_pm_virt_sysfs_init(adev); 3017 3018 r = amdgpu_pm_sysfs_init(adev); 3019 if (r) 3020 DRM_ERROR("registering pm debugfs failed (%d).\n", r); 3021 3022 r = amdgpu_ucode_sysfs_init(adev); 3023 if (r) 3024 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r); 3025 3026 r = amdgpu_debugfs_gem_init(adev); 3027 if (r) 3028 DRM_ERROR("registering gem debugfs failed (%d).\n", r); 3029 3030 r = amdgpu_debugfs_regs_init(adev); 3031 if (r) 3032 DRM_ERROR("registering register debugfs failed (%d).\n", r); 3033 3034 r = amdgpu_debugfs_firmware_init(adev); 3035 if (r) 3036 DRM_ERROR("registering firmware debugfs failed (%d).\n", r); 3037 3038 r = amdgpu_debugfs_init(adev); 3039 if (r) 3040 DRM_ERROR("Creating debugfs files failed (%d).\n", r); 3041 3042 if ((amdgpu_testing & 1)) { 3043 if (adev->accel_working) 3044 amdgpu_test_moves(adev); 3045 else 3046 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n"); 3047 } 3048 if (amdgpu_benchmarking) { 3049 if (adev->accel_working) 3050 amdgpu_benchmark(adev, amdgpu_benchmarking); 3051 else 3052 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n"); 3053 } 3054 3055 /* 3056 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost. 3057 * Otherwise the mgpu fan boost feature will be skipped due to the 3058 * gpu instance is counted less. 3059 */ 3060 amdgpu_register_gpu_instance(adev); 3061 3062 /* enable clockgating, etc. after ib tests, etc. since some blocks require 3063 * explicit gating rather than handling it automatically. 3064 */ 3065 r = amdgpu_device_ip_late_init(adev); 3066 if (r) { 3067 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n"); 3068 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r); 3069 goto failed; 3070 } 3071 3072 /* must succeed. */ 3073 amdgpu_ras_resume(adev); 3074 3075 queue_delayed_work(system_wq, &adev->delayed_init_work, 3076 msecs_to_jiffies(AMDGPU_RESUME_MS)); 3077 3078 r = device_create_file(adev->dev, &dev_attr_pcie_replay_count); 3079 if (r) { 3080 dev_err(adev->dev, "Could not create pcie_replay_count"); 3081 return r; 3082 } 3083 3084 if (IS_ENABLED(CONFIG_PERF_EVENTS)) 3085 r = amdgpu_pmu_init(adev); 3086 if (r) 3087 dev_err(adev->dev, "amdgpu_pmu_init failed\n"); 3088 3089 return 0; 3090 3091 failed: 3092 amdgpu_vf_error_trans_all(adev); 3093 if (runtime) 3094 vga_switcheroo_fini_domain_pm_ops(adev->dev); 3095 3096 return r; 3097 } 3098 3099 /** 3100 * amdgpu_device_fini - tear down the driver 3101 * 3102 * @adev: amdgpu_device pointer 3103 * 3104 * Tear down the driver info (all asics). 3105 * Called at driver shutdown. 3106 */ 3107 void amdgpu_device_fini(struct amdgpu_device *adev) 3108 { 3109 int r; 3110 3111 DRM_INFO("amdgpu: finishing device.\n"); 3112 adev->shutdown = true; 3113 3114 flush_delayed_work(&adev->delayed_init_work); 3115 3116 /* disable all interrupts */ 3117 amdgpu_irq_disable_all(adev); 3118 if (adev->mode_info.mode_config_initialized){ 3119 if (!amdgpu_device_has_dc_support(adev)) 3120 drm_helper_force_disable_all(adev->ddev); 3121 else 3122 drm_atomic_helper_shutdown(adev->ddev); 3123 } 3124 amdgpu_fence_driver_fini(adev); 3125 amdgpu_pm_sysfs_fini(adev); 3126 amdgpu_fbdev_fini(adev); 3127 r = amdgpu_device_ip_fini(adev); 3128 if (adev->firmware.gpu_info_fw) { 3129 release_firmware(adev->firmware.gpu_info_fw); 3130 adev->firmware.gpu_info_fw = NULL; 3131 } 3132 adev->accel_working = false; 3133 cancel_delayed_work_sync(&adev->delayed_init_work); 3134 /* free i2c buses */ 3135 if (!amdgpu_device_has_dc_support(adev)) 3136 amdgpu_i2c_fini(adev); 3137 3138 if (amdgpu_emu_mode != 1) 3139 amdgpu_atombios_fini(adev); 3140 3141 kfree(adev->bios); 3142 adev->bios = NULL; 3143 if (!pci_is_thunderbolt_attached(adev->pdev)) 3144 vga_switcheroo_unregister_client(adev->pdev); 3145 if (adev->flags & AMD_IS_PX) 3146 vga_switcheroo_fini_domain_pm_ops(adev->dev); 3147 vga_client_register(adev->pdev, NULL, NULL, NULL); 3148 if (adev->rio_mem) 3149 pci_iounmap(adev->pdev, adev->rio_mem); 3150 adev->rio_mem = NULL; 3151 iounmap(adev->rmmio); 3152 adev->rmmio = NULL; 3153 amdgpu_device_doorbell_fini(adev); 3154 if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev)) 3155 amdgpu_pm_virt_sysfs_fini(adev); 3156 3157 amdgpu_debugfs_regs_cleanup(adev); 3158 device_remove_file(adev->dev, &dev_attr_pcie_replay_count); 3159 amdgpu_ucode_sysfs_fini(adev); 3160 if (IS_ENABLED(CONFIG_PERF_EVENTS)) 3161 amdgpu_pmu_fini(adev); 3162 amdgpu_debugfs_preempt_cleanup(adev); 3163 if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) 3164 amdgpu_discovery_fini(adev); 3165 } 3166 3167 3168 /* 3169 * Suspend & resume. 3170 */ 3171 /** 3172 * amdgpu_device_suspend - initiate device suspend 3173 * 3174 * @dev: drm dev pointer 3175 * @suspend: suspend state 3176 * @fbcon : notify the fbdev of suspend 3177 * 3178 * Puts the hw in the suspend state (all asics). 3179 * Returns 0 for success or an error on failure. 3180 * Called at driver suspend. 3181 */ 3182 int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) 3183 { 3184 struct amdgpu_device *adev; 3185 struct drm_crtc *crtc; 3186 struct drm_connector *connector; 3187 struct drm_connector_list_iter iter; 3188 int r; 3189 3190 if (dev == NULL || dev->dev_private == NULL) { 3191 return -ENODEV; 3192 } 3193 3194 adev = dev->dev_private; 3195 3196 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 3197 return 0; 3198 3199 adev->in_suspend = true; 3200 drm_kms_helper_poll_disable(dev); 3201 3202 if (fbcon) 3203 amdgpu_fbdev_set_suspend(adev, 1); 3204 3205 cancel_delayed_work_sync(&adev->delayed_init_work); 3206 3207 if (!amdgpu_device_has_dc_support(adev)) { 3208 /* turn off display hw */ 3209 drm_modeset_lock_all(dev); 3210 drm_connector_list_iter_begin(dev, &iter); 3211 drm_for_each_connector_iter(connector, &iter) 3212 drm_helper_connector_dpms(connector, 3213 DRM_MODE_DPMS_OFF); 3214 drm_connector_list_iter_end(&iter); 3215 drm_modeset_unlock_all(dev); 3216 /* unpin the front buffers and cursors */ 3217 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 3218 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 3219 struct drm_framebuffer *fb = crtc->primary->fb; 3220 struct amdgpu_bo *robj; 3221 3222 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { 3223 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 3224 r = amdgpu_bo_reserve(aobj, true); 3225 if (r == 0) { 3226 amdgpu_bo_unpin(aobj); 3227 amdgpu_bo_unreserve(aobj); 3228 } 3229 } 3230 3231 if (fb == NULL || fb->obj[0] == NULL) { 3232 continue; 3233 } 3234 robj = gem_to_amdgpu_bo(fb->obj[0]); 3235 /* don't unpin kernel fb objects */ 3236 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) { 3237 r = amdgpu_bo_reserve(robj, true); 3238 if (r == 0) { 3239 amdgpu_bo_unpin(robj); 3240 amdgpu_bo_unreserve(robj); 3241 } 3242 } 3243 } 3244 } 3245 3246 amdgpu_amdkfd_suspend(adev); 3247 3248 amdgpu_ras_suspend(adev); 3249 3250 r = amdgpu_device_ip_suspend_phase1(adev); 3251 3252 /* evict vram memory */ 3253 amdgpu_bo_evict_vram(adev); 3254 3255 amdgpu_fence_driver_suspend(adev); 3256 3257 r = amdgpu_device_ip_suspend_phase2(adev); 3258 3259 /* evict remaining vram memory 3260 * This second call to evict vram is to evict the gart page table 3261 * using the CPU. 3262 */ 3263 amdgpu_bo_evict_vram(adev); 3264 3265 if (suspend) { 3266 pci_save_state(dev->pdev); 3267 /* Shut down the device */ 3268 pci_disable_device(dev->pdev); 3269 pci_set_power_state(dev->pdev, PCI_D3hot); 3270 } 3271 3272 return 0; 3273 } 3274 3275 /** 3276 * amdgpu_device_resume - initiate device resume 3277 * 3278 * @dev: drm dev pointer 3279 * @resume: resume state 3280 * @fbcon : notify the fbdev of resume 3281 * 3282 * Bring the hw back to operating state (all asics). 3283 * Returns 0 for success or an error on failure. 3284 * Called at driver resume. 3285 */ 3286 int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) 3287 { 3288 struct drm_connector *connector; 3289 struct drm_connector_list_iter iter; 3290 struct amdgpu_device *adev = dev->dev_private; 3291 struct drm_crtc *crtc; 3292 int r = 0; 3293 3294 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 3295 return 0; 3296 3297 if (resume) { 3298 pci_set_power_state(dev->pdev, PCI_D0); 3299 pci_restore_state(dev->pdev); 3300 r = pci_enable_device(dev->pdev); 3301 if (r) 3302 return r; 3303 } 3304 3305 /* post card */ 3306 if (amdgpu_device_need_post(adev)) { 3307 r = amdgpu_atom_asic_init(adev->mode_info.atom_context); 3308 if (r) 3309 DRM_ERROR("amdgpu asic init failed\n"); 3310 } 3311 3312 r = amdgpu_device_ip_resume(adev); 3313 if (r) { 3314 DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r); 3315 return r; 3316 } 3317 amdgpu_fence_driver_resume(adev); 3318 3319 3320 r = amdgpu_device_ip_late_init(adev); 3321 if (r) 3322 return r; 3323 3324 queue_delayed_work(system_wq, &adev->delayed_init_work, 3325 msecs_to_jiffies(AMDGPU_RESUME_MS)); 3326 3327 if (!amdgpu_device_has_dc_support(adev)) { 3328 /* pin cursors */ 3329 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 3330 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 3331 3332 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { 3333 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 3334 r = amdgpu_bo_reserve(aobj, true); 3335 if (r == 0) { 3336 r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); 3337 if (r != 0) 3338 DRM_ERROR("Failed to pin cursor BO (%d)\n", r); 3339 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); 3340 amdgpu_bo_unreserve(aobj); 3341 } 3342 } 3343 } 3344 } 3345 r = amdgpu_amdkfd_resume(adev); 3346 if (r) 3347 return r; 3348 3349 /* Make sure IB tests flushed */ 3350 flush_delayed_work(&adev->delayed_init_work); 3351 3352 /* blat the mode back in */ 3353 if (fbcon) { 3354 if (!amdgpu_device_has_dc_support(adev)) { 3355 /* pre DCE11 */ 3356 drm_helper_resume_force_mode(dev); 3357 3358 /* turn on display hw */ 3359 drm_modeset_lock_all(dev); 3360 3361 drm_connector_list_iter_begin(dev, &iter); 3362 drm_for_each_connector_iter(connector, &iter) 3363 drm_helper_connector_dpms(connector, 3364 DRM_MODE_DPMS_ON); 3365 drm_connector_list_iter_end(&iter); 3366 3367 drm_modeset_unlock_all(dev); 3368 } 3369 amdgpu_fbdev_set_suspend(adev, 0); 3370 } 3371 3372 drm_kms_helper_poll_enable(dev); 3373 3374 amdgpu_ras_resume(adev); 3375 3376 /* 3377 * Most of the connector probing functions try to acquire runtime pm 3378 * refs to ensure that the GPU is powered on when connector polling is 3379 * performed. Since we're calling this from a runtime PM callback, 3380 * trying to acquire rpm refs will cause us to deadlock. 3381 * 3382 * Since we're guaranteed to be holding the rpm lock, it's safe to 3383 * temporarily disable the rpm helpers so this doesn't deadlock us. 3384 */ 3385 #ifdef CONFIG_PM 3386 dev->dev->power.disable_depth++; 3387 #endif 3388 if (!amdgpu_device_has_dc_support(adev)) 3389 drm_helper_hpd_irq_event(dev); 3390 else 3391 drm_kms_helper_hotplug_event(dev); 3392 #ifdef CONFIG_PM 3393 dev->dev->power.disable_depth--; 3394 #endif 3395 adev->in_suspend = false; 3396 3397 return 0; 3398 } 3399 3400 /** 3401 * amdgpu_device_ip_check_soft_reset - did soft reset succeed 3402 * 3403 * @adev: amdgpu_device pointer 3404 * 3405 * The list of all the hardware IPs that make up the asic is walked and 3406 * the check_soft_reset callbacks are run. check_soft_reset determines 3407 * if the asic is still hung or not. 3408 * Returns true if any of the IPs are still in a hung state, false if not. 3409 */ 3410 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev) 3411 { 3412 int i; 3413 bool asic_hang = false; 3414 3415 if (amdgpu_sriov_vf(adev)) 3416 return true; 3417 3418 if (amdgpu_asic_need_full_reset(adev)) 3419 return true; 3420 3421 for (i = 0; i < adev->num_ip_blocks; i++) { 3422 if (!adev->ip_blocks[i].status.valid) 3423 continue; 3424 if (adev->ip_blocks[i].version->funcs->check_soft_reset) 3425 adev->ip_blocks[i].status.hang = 3426 adev->ip_blocks[i].version->funcs->check_soft_reset(adev); 3427 if (adev->ip_blocks[i].status.hang) { 3428 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name); 3429 asic_hang = true; 3430 } 3431 } 3432 return asic_hang; 3433 } 3434 3435 /** 3436 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset 3437 * 3438 * @adev: amdgpu_device pointer 3439 * 3440 * The list of all the hardware IPs that make up the asic is walked and the 3441 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset 3442 * handles any IP specific hardware or software state changes that are 3443 * necessary for a soft reset to succeed. 3444 * Returns 0 on success, negative error code on failure. 3445 */ 3446 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev) 3447 { 3448 int i, r = 0; 3449 3450 for (i = 0; i < adev->num_ip_blocks; i++) { 3451 if (!adev->ip_blocks[i].status.valid) 3452 continue; 3453 if (adev->ip_blocks[i].status.hang && 3454 adev->ip_blocks[i].version->funcs->pre_soft_reset) { 3455 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev); 3456 if (r) 3457 return r; 3458 } 3459 } 3460 3461 return 0; 3462 } 3463 3464 /** 3465 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed 3466 * 3467 * @adev: amdgpu_device pointer 3468 * 3469 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu 3470 * reset is necessary to recover. 3471 * Returns true if a full asic reset is required, false if not. 3472 */ 3473 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev) 3474 { 3475 int i; 3476 3477 if (amdgpu_asic_need_full_reset(adev)) 3478 return true; 3479 3480 for (i = 0; i < adev->num_ip_blocks; i++) { 3481 if (!adev->ip_blocks[i].status.valid) 3482 continue; 3483 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) || 3484 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) || 3485 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) || 3486 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) || 3487 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { 3488 if (adev->ip_blocks[i].status.hang) { 3489 DRM_INFO("Some block need full reset!\n"); 3490 return true; 3491 } 3492 } 3493 } 3494 return false; 3495 } 3496 3497 /** 3498 * amdgpu_device_ip_soft_reset - do a soft reset 3499 * 3500 * @adev: amdgpu_device pointer 3501 * 3502 * The list of all the hardware IPs that make up the asic is walked and the 3503 * soft_reset callbacks are run if the block is hung. soft_reset handles any 3504 * IP specific hardware or software state changes that are necessary to soft 3505 * reset the IP. 3506 * Returns 0 on success, negative error code on failure. 3507 */ 3508 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev) 3509 { 3510 int i, r = 0; 3511 3512 for (i = 0; i < adev->num_ip_blocks; i++) { 3513 if (!adev->ip_blocks[i].status.valid) 3514 continue; 3515 if (adev->ip_blocks[i].status.hang && 3516 adev->ip_blocks[i].version->funcs->soft_reset) { 3517 r = adev->ip_blocks[i].version->funcs->soft_reset(adev); 3518 if (r) 3519 return r; 3520 } 3521 } 3522 3523 return 0; 3524 } 3525 3526 /** 3527 * amdgpu_device_ip_post_soft_reset - clean up from soft reset 3528 * 3529 * @adev: amdgpu_device pointer 3530 * 3531 * The list of all the hardware IPs that make up the asic is walked and the 3532 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset 3533 * handles any IP specific hardware or software state changes that are 3534 * necessary after the IP has been soft reset. 3535 * Returns 0 on success, negative error code on failure. 3536 */ 3537 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev) 3538 { 3539 int i, r = 0; 3540 3541 for (i = 0; i < adev->num_ip_blocks; i++) { 3542 if (!adev->ip_blocks[i].status.valid) 3543 continue; 3544 if (adev->ip_blocks[i].status.hang && 3545 adev->ip_blocks[i].version->funcs->post_soft_reset) 3546 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev); 3547 if (r) 3548 return r; 3549 } 3550 3551 return 0; 3552 } 3553 3554 /** 3555 * amdgpu_device_recover_vram - Recover some VRAM contents 3556 * 3557 * @adev: amdgpu_device pointer 3558 * 3559 * Restores the contents of VRAM buffers from the shadows in GTT. Used to 3560 * restore things like GPUVM page tables after a GPU reset where 3561 * the contents of VRAM might be lost. 3562 * 3563 * Returns: 3564 * 0 on success, negative error code on failure. 3565 */ 3566 static int amdgpu_device_recover_vram(struct amdgpu_device *adev) 3567 { 3568 struct dma_fence *fence = NULL, *next = NULL; 3569 struct amdgpu_bo *shadow; 3570 long r = 1, tmo; 3571 3572 if (amdgpu_sriov_runtime(adev)) 3573 tmo = msecs_to_jiffies(8000); 3574 else 3575 tmo = msecs_to_jiffies(100); 3576 3577 DRM_INFO("recover vram bo from shadow start\n"); 3578 mutex_lock(&adev->shadow_list_lock); 3579 list_for_each_entry(shadow, &adev->shadow_list, shadow_list) { 3580 3581 /* No need to recover an evicted BO */ 3582 if (shadow->tbo.mem.mem_type != TTM_PL_TT || 3583 shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET || 3584 shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM) 3585 continue; 3586 3587 r = amdgpu_bo_restore_shadow(shadow, &next); 3588 if (r) 3589 break; 3590 3591 if (fence) { 3592 tmo = dma_fence_wait_timeout(fence, false, tmo); 3593 dma_fence_put(fence); 3594 fence = next; 3595 if (tmo == 0) { 3596 r = -ETIMEDOUT; 3597 break; 3598 } else if (tmo < 0) { 3599 r = tmo; 3600 break; 3601 } 3602 } else { 3603 fence = next; 3604 } 3605 } 3606 mutex_unlock(&adev->shadow_list_lock); 3607 3608 if (fence) 3609 tmo = dma_fence_wait_timeout(fence, false, tmo); 3610 dma_fence_put(fence); 3611 3612 if (r < 0 || tmo <= 0) { 3613 DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo); 3614 return -EIO; 3615 } 3616 3617 DRM_INFO("recover vram bo from shadow done\n"); 3618 return 0; 3619 } 3620 3621 3622 /** 3623 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf 3624 * 3625 * @adev: amdgpu device pointer 3626 * @from_hypervisor: request from hypervisor 3627 * 3628 * do VF FLR and reinitialize Asic 3629 * return 0 means succeeded otherwise failed 3630 */ 3631 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, 3632 bool from_hypervisor) 3633 { 3634 int r; 3635 3636 if (from_hypervisor) 3637 r = amdgpu_virt_request_full_gpu(adev, true); 3638 else 3639 r = amdgpu_virt_reset_gpu(adev); 3640 if (r) 3641 return r; 3642 3643 amdgpu_amdkfd_pre_reset(adev); 3644 3645 /* Resume IP prior to SMC */ 3646 r = amdgpu_device_ip_reinit_early_sriov(adev); 3647 if (r) 3648 goto error; 3649 3650 /* we need recover gart prior to run SMC/CP/SDMA resume */ 3651 amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]); 3652 3653 r = amdgpu_device_fw_loading(adev); 3654 if (r) 3655 return r; 3656 3657 /* now we are okay to resume SMC/CP/SDMA */ 3658 r = amdgpu_device_ip_reinit_late_sriov(adev); 3659 if (r) 3660 goto error; 3661 3662 amdgpu_irq_gpu_reset_resume_helper(adev); 3663 r = amdgpu_ib_ring_tests(adev); 3664 amdgpu_amdkfd_post_reset(adev); 3665 3666 error: 3667 amdgpu_virt_init_data_exchange(adev); 3668 amdgpu_virt_release_full_gpu(adev, true); 3669 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { 3670 amdgpu_inc_vram_lost(adev); 3671 r = amdgpu_device_recover_vram(adev); 3672 } 3673 3674 return r; 3675 } 3676 3677 /** 3678 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery 3679 * 3680 * @adev: amdgpu device pointer 3681 * 3682 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover 3683 * a hung GPU. 3684 */ 3685 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) 3686 { 3687 if (!amdgpu_device_ip_check_soft_reset(adev)) { 3688 DRM_INFO("Timeout, but no hardware hang detected.\n"); 3689 return false; 3690 } 3691 3692 if (amdgpu_gpu_recovery == 0) 3693 goto disabled; 3694 3695 if (amdgpu_sriov_vf(adev)) 3696 return true; 3697 3698 if (amdgpu_gpu_recovery == -1) { 3699 switch (adev->asic_type) { 3700 case CHIP_BONAIRE: 3701 case CHIP_HAWAII: 3702 case CHIP_TOPAZ: 3703 case CHIP_TONGA: 3704 case CHIP_FIJI: 3705 case CHIP_POLARIS10: 3706 case CHIP_POLARIS11: 3707 case CHIP_POLARIS12: 3708 case CHIP_VEGAM: 3709 case CHIP_VEGA20: 3710 case CHIP_VEGA10: 3711 case CHIP_VEGA12: 3712 case CHIP_RAVEN: 3713 break; 3714 default: 3715 goto disabled; 3716 } 3717 } 3718 3719 return true; 3720 3721 disabled: 3722 DRM_INFO("GPU recovery disabled.\n"); 3723 return false; 3724 } 3725 3726 3727 static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, 3728 struct amdgpu_job *job, 3729 bool *need_full_reset_arg) 3730 { 3731 int i, r = 0; 3732 bool need_full_reset = *need_full_reset_arg; 3733 3734 /* block all schedulers and reset given job's ring */ 3735 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 3736 struct amdgpu_ring *ring = adev->rings[i]; 3737 3738 if (!ring || !ring->sched.thread) 3739 continue; 3740 3741 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ 3742 amdgpu_fence_driver_force_completion(ring); 3743 } 3744 3745 if(job) 3746 drm_sched_increase_karma(&job->base); 3747 3748 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */ 3749 if (!amdgpu_sriov_vf(adev)) { 3750 3751 if (!need_full_reset) 3752 need_full_reset = amdgpu_device_ip_need_full_reset(adev); 3753 3754 if (!need_full_reset) { 3755 amdgpu_device_ip_pre_soft_reset(adev); 3756 r = amdgpu_device_ip_soft_reset(adev); 3757 amdgpu_device_ip_post_soft_reset(adev); 3758 if (r || amdgpu_device_ip_check_soft_reset(adev)) { 3759 DRM_INFO("soft reset failed, will fallback to full reset!\n"); 3760 need_full_reset = true; 3761 } 3762 } 3763 3764 if (need_full_reset) 3765 r = amdgpu_device_ip_suspend(adev); 3766 3767 *need_full_reset_arg = need_full_reset; 3768 } 3769 3770 return r; 3771 } 3772 3773 static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, 3774 struct list_head *device_list_handle, 3775 bool *need_full_reset_arg) 3776 { 3777 struct amdgpu_device *tmp_adev = NULL; 3778 bool need_full_reset = *need_full_reset_arg, vram_lost = false; 3779 int r = 0; 3780 3781 /* 3782 * ASIC reset has to be done on all HGMI hive nodes ASAP 3783 * to allow proper links negotiation in FW (within 1 sec) 3784 */ 3785 if (need_full_reset) { 3786 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { 3787 /* For XGMI run all resets in parallel to speed up the process */ 3788 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { 3789 if (!queue_work(system_highpri_wq, &tmp_adev->xgmi_reset_work)) 3790 r = -EALREADY; 3791 } else 3792 r = amdgpu_asic_reset(tmp_adev); 3793 3794 if (r) { 3795 DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s", 3796 r, tmp_adev->ddev->unique); 3797 break; 3798 } 3799 } 3800 3801 /* For XGMI wait for all PSP resets to complete before proceed */ 3802 if (!r) { 3803 list_for_each_entry(tmp_adev, device_list_handle, 3804 gmc.xgmi.head) { 3805 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { 3806 flush_work(&tmp_adev->xgmi_reset_work); 3807 r = tmp_adev->asic_reset_res; 3808 if (r) 3809 break; 3810 } 3811 } 3812 } 3813 } 3814 3815 3816 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { 3817 if (need_full_reset) { 3818 /* post card */ 3819 if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context)) 3820 DRM_WARN("asic atom init failed!"); 3821 3822 if (!r) { 3823 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); 3824 r = amdgpu_device_ip_resume_phase1(tmp_adev); 3825 if (r) 3826 goto out; 3827 3828 vram_lost = amdgpu_device_check_vram_lost(tmp_adev); 3829 if (vram_lost) { 3830 DRM_INFO("VRAM is lost due to GPU reset!\n"); 3831 amdgpu_inc_vram_lost(tmp_adev); 3832 } 3833 3834 r = amdgpu_gtt_mgr_recover( 3835 &tmp_adev->mman.bdev.man[TTM_PL_TT]); 3836 if (r) 3837 goto out; 3838 3839 r = amdgpu_device_fw_loading(tmp_adev); 3840 if (r) 3841 return r; 3842 3843 r = amdgpu_device_ip_resume_phase2(tmp_adev); 3844 if (r) 3845 goto out; 3846 3847 if (vram_lost) 3848 amdgpu_device_fill_reset_magic(tmp_adev); 3849 3850 /* 3851 * Add this ASIC as tracked as reset was already 3852 * complete successfully. 3853 */ 3854 amdgpu_register_gpu_instance(tmp_adev); 3855 3856 r = amdgpu_device_ip_late_init(tmp_adev); 3857 if (r) 3858 goto out; 3859 3860 /* must succeed. */ 3861 amdgpu_ras_resume(tmp_adev); 3862 3863 /* Update PSP FW topology after reset */ 3864 if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1) 3865 r = amdgpu_xgmi_update_topology(hive, tmp_adev); 3866 } 3867 } 3868 3869 3870 out: 3871 if (!r) { 3872 amdgpu_irq_gpu_reset_resume_helper(tmp_adev); 3873 r = amdgpu_ib_ring_tests(tmp_adev); 3874 if (r) { 3875 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r); 3876 r = amdgpu_device_ip_suspend(tmp_adev); 3877 need_full_reset = true; 3878 r = -EAGAIN; 3879 goto end; 3880 } 3881 } 3882 3883 if (!r) 3884 r = amdgpu_device_recover_vram(tmp_adev); 3885 else 3886 tmp_adev->asic_reset_res = r; 3887 } 3888 3889 end: 3890 *need_full_reset_arg = need_full_reset; 3891 return r; 3892 } 3893 3894 static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock) 3895 { 3896 if (trylock) { 3897 if (!mutex_trylock(&adev->lock_reset)) 3898 return false; 3899 } else 3900 mutex_lock(&adev->lock_reset); 3901 3902 atomic_inc(&adev->gpu_reset_counter); 3903 adev->in_gpu_reset = 1; 3904 switch (amdgpu_asic_reset_method(adev)) { 3905 case AMD_RESET_METHOD_MODE1: 3906 adev->mp1_state = PP_MP1_STATE_SHUTDOWN; 3907 break; 3908 case AMD_RESET_METHOD_MODE2: 3909 adev->mp1_state = PP_MP1_STATE_RESET; 3910 break; 3911 default: 3912 adev->mp1_state = PP_MP1_STATE_NONE; 3913 break; 3914 } 3915 3916 return true; 3917 } 3918 3919 static void amdgpu_device_unlock_adev(struct amdgpu_device *adev) 3920 { 3921 amdgpu_vf_error_trans_all(adev); 3922 adev->mp1_state = PP_MP1_STATE_NONE; 3923 adev->in_gpu_reset = 0; 3924 mutex_unlock(&adev->lock_reset); 3925 } 3926 3927 /** 3928 * amdgpu_device_gpu_recover - reset the asic and recover scheduler 3929 * 3930 * @adev: amdgpu device pointer 3931 * @job: which job trigger hang 3932 * 3933 * Attempt to reset the GPU if it has hung (all asics). 3934 * Attempt to do soft-reset or full-reset and reinitialize Asic 3935 * Returns 0 for success or an error on failure. 3936 */ 3937 3938 int amdgpu_device_gpu_recover(struct amdgpu_device *adev, 3939 struct amdgpu_job *job) 3940 { 3941 struct list_head device_list, *device_list_handle = NULL; 3942 bool need_full_reset, job_signaled; 3943 struct amdgpu_hive_info *hive = NULL; 3944 struct amdgpu_device *tmp_adev = NULL; 3945 int i, r = 0; 3946 bool in_ras_intr = amdgpu_ras_intr_triggered(); 3947 3948 /* 3949 * Flush RAM to disk so that after reboot 3950 * the user can read log and see why the system rebooted. 3951 */ 3952 if (in_ras_intr && amdgpu_ras_get_context(adev)->reboot) { 3953 3954 DRM_WARN("Emergency reboot."); 3955 3956 ksys_sync_helper(); 3957 emergency_restart(); 3958 } 3959 3960 need_full_reset = job_signaled = false; 3961 INIT_LIST_HEAD(&device_list); 3962 3963 dev_info(adev->dev, "GPU %s begin!\n", in_ras_intr ? "jobs stop":"reset"); 3964 3965 cancel_delayed_work_sync(&adev->delayed_init_work); 3966 3967 hive = amdgpu_get_xgmi_hive(adev, false); 3968 3969 /* 3970 * Here we trylock to avoid chain of resets executing from 3971 * either trigger by jobs on different adevs in XGMI hive or jobs on 3972 * different schedulers for same device while this TO handler is running. 3973 * We always reset all schedulers for device and all devices for XGMI 3974 * hive so that should take care of them too. 3975 */ 3976 3977 if (hive && !mutex_trylock(&hive->reset_lock)) { 3978 DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress", 3979 job ? job->base.id : -1, hive->hive_id); 3980 return 0; 3981 } 3982 3983 /* Start with adev pre asic reset first for soft reset check.*/ 3984 if (!amdgpu_device_lock_adev(adev, !hive)) { 3985 DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress", 3986 job ? job->base.id : -1); 3987 return 0; 3988 } 3989 3990 /* Block kfd: SRIOV would do it separately */ 3991 if (!amdgpu_sriov_vf(adev)) 3992 amdgpu_amdkfd_pre_reset(adev); 3993 3994 /* Build list of devices to reset */ 3995 if (adev->gmc.xgmi.num_physical_nodes > 1) { 3996 if (!hive) { 3997 /*unlock kfd: SRIOV would do it separately */ 3998 if (!amdgpu_sriov_vf(adev)) 3999 amdgpu_amdkfd_post_reset(adev); 4000 amdgpu_device_unlock_adev(adev); 4001 return -ENODEV; 4002 } 4003 4004 /* 4005 * In case we are in XGMI hive mode device reset is done for all the 4006 * nodes in the hive to retrain all XGMI links and hence the reset 4007 * sequence is executed in loop on all nodes. 4008 */ 4009 device_list_handle = &hive->device_list; 4010 } else { 4011 list_add_tail(&adev->gmc.xgmi.head, &device_list); 4012 device_list_handle = &device_list; 4013 } 4014 4015 /* block all schedulers and reset given job's ring */ 4016 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { 4017 if (tmp_adev != adev) { 4018 amdgpu_device_lock_adev(tmp_adev, false); 4019 if (!amdgpu_sriov_vf(tmp_adev)) 4020 amdgpu_amdkfd_pre_reset(tmp_adev); 4021 } 4022 4023 /* 4024 * Mark these ASICs to be reseted as untracked first 4025 * And add them back after reset completed 4026 */ 4027 amdgpu_unregister_gpu_instance(tmp_adev); 4028 4029 /* disable ras on ALL IPs */ 4030 if (!in_ras_intr && amdgpu_device_ip_need_full_reset(tmp_adev)) 4031 amdgpu_ras_suspend(tmp_adev); 4032 4033 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 4034 struct amdgpu_ring *ring = tmp_adev->rings[i]; 4035 4036 if (!ring || !ring->sched.thread) 4037 continue; 4038 4039 drm_sched_stop(&ring->sched, job ? &job->base : NULL); 4040 4041 if (in_ras_intr) 4042 amdgpu_job_stop_all_jobs_on_sched(&ring->sched); 4043 } 4044 } 4045 4046 4047 if (in_ras_intr) 4048 goto skip_sched_resume; 4049 4050 /* 4051 * Must check guilty signal here since after this point all old 4052 * HW fences are force signaled. 4053 * 4054 * job->base holds a reference to parent fence 4055 */ 4056 if (job && job->base.s_fence->parent && 4057 dma_fence_is_signaled(job->base.s_fence->parent)) 4058 job_signaled = true; 4059 4060 if (job_signaled) { 4061 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset"); 4062 goto skip_hw_reset; 4063 } 4064 4065 4066 /* Guilty job will be freed after this*/ 4067 r = amdgpu_device_pre_asic_reset(adev, job, &need_full_reset); 4068 if (r) { 4069 /*TODO Should we stop ?*/ 4070 DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ", 4071 r, adev->ddev->unique); 4072 adev->asic_reset_res = r; 4073 } 4074 4075 retry: /* Rest of adevs pre asic reset from XGMI hive. */ 4076 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { 4077 4078 if (tmp_adev == adev) 4079 continue; 4080 4081 r = amdgpu_device_pre_asic_reset(tmp_adev, 4082 NULL, 4083 &need_full_reset); 4084 /*TODO Should we stop ?*/ 4085 if (r) { 4086 DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ", 4087 r, tmp_adev->ddev->unique); 4088 tmp_adev->asic_reset_res = r; 4089 } 4090 } 4091 4092 /* Actual ASIC resets if needed.*/ 4093 /* TODO Implement XGMI hive reset logic for SRIOV */ 4094 if (amdgpu_sriov_vf(adev)) { 4095 r = amdgpu_device_reset_sriov(adev, job ? false : true); 4096 if (r) 4097 adev->asic_reset_res = r; 4098 } else { 4099 r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset); 4100 if (r && r == -EAGAIN) 4101 goto retry; 4102 } 4103 4104 skip_hw_reset: 4105 4106 /* Post ASIC reset for all devs .*/ 4107 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { 4108 4109 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 4110 struct amdgpu_ring *ring = tmp_adev->rings[i]; 4111 4112 if (!ring || !ring->sched.thread) 4113 continue; 4114 4115 /* No point to resubmit jobs if we didn't HW reset*/ 4116 if (!tmp_adev->asic_reset_res && !job_signaled) 4117 drm_sched_resubmit_jobs(&ring->sched); 4118 4119 drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res); 4120 } 4121 4122 if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) { 4123 drm_helper_resume_force_mode(tmp_adev->ddev); 4124 } 4125 4126 tmp_adev->asic_reset_res = 0; 4127 4128 if (r) { 4129 /* bad news, how to tell it to userspace ? */ 4130 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter)); 4131 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); 4132 } else { 4133 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter)); 4134 } 4135 } 4136 4137 skip_sched_resume: 4138 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { 4139 /*unlock kfd: SRIOV would do it separately */ 4140 if (!in_ras_intr && !amdgpu_sriov_vf(tmp_adev)) 4141 amdgpu_amdkfd_post_reset(tmp_adev); 4142 amdgpu_device_unlock_adev(tmp_adev); 4143 } 4144 4145 if (hive) 4146 mutex_unlock(&hive->reset_lock); 4147 4148 if (r) 4149 dev_info(adev->dev, "GPU reset end with ret = %d\n", r); 4150 return r; 4151 } 4152 4153 /** 4154 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot 4155 * 4156 * @adev: amdgpu_device pointer 4157 * 4158 * Fetchs and stores in the driver the PCIE capabilities (gen speed 4159 * and lanes) of the slot the device is in. Handles APUs and 4160 * virtualized environments where PCIE config space may not be available. 4161 */ 4162 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) 4163 { 4164 struct pci_dev *pdev; 4165 enum pci_bus_speed speed_cap, platform_speed_cap; 4166 enum pcie_link_width platform_link_width; 4167 4168 if (amdgpu_pcie_gen_cap) 4169 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap; 4170 4171 if (amdgpu_pcie_lane_cap) 4172 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap; 4173 4174 /* covers APUs as well */ 4175 if (pci_is_root_bus(adev->pdev->bus)) { 4176 if (adev->pm.pcie_gen_mask == 0) 4177 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK; 4178 if (adev->pm.pcie_mlw_mask == 0) 4179 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK; 4180 return; 4181 } 4182 4183 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask) 4184 return; 4185 4186 pcie_bandwidth_available(adev->pdev, NULL, 4187 &platform_speed_cap, &platform_link_width); 4188 4189 if (adev->pm.pcie_gen_mask == 0) { 4190 /* asic caps */ 4191 pdev = adev->pdev; 4192 speed_cap = pcie_get_speed_cap(pdev); 4193 if (speed_cap == PCI_SPEED_UNKNOWN) { 4194 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 4195 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 4196 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); 4197 } else { 4198 if (speed_cap == PCIE_SPEED_16_0GT) 4199 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 4200 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 4201 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 | 4202 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4); 4203 else if (speed_cap == PCIE_SPEED_8_0GT) 4204 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 4205 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 4206 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); 4207 else if (speed_cap == PCIE_SPEED_5_0GT) 4208 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 4209 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2); 4210 else 4211 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1; 4212 } 4213 /* platform caps */ 4214 if (platform_speed_cap == PCI_SPEED_UNKNOWN) { 4215 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 4216 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2); 4217 } else { 4218 if (platform_speed_cap == PCIE_SPEED_16_0GT) 4219 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 4220 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 4221 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 | 4222 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4); 4223 else if (platform_speed_cap == PCIE_SPEED_8_0GT) 4224 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 4225 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 4226 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3); 4227 else if (platform_speed_cap == PCIE_SPEED_5_0GT) 4228 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 4229 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2); 4230 else 4231 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; 4232 4233 } 4234 } 4235 if (adev->pm.pcie_mlw_mask == 0) { 4236 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) { 4237 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK; 4238 } else { 4239 switch (platform_link_width) { 4240 case PCIE_LNK_X32: 4241 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | 4242 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 4243 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 4244 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 4245 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 4246 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 4247 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 4248 break; 4249 case PCIE_LNK_X16: 4250 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 4251 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 4252 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 4253 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 4254 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 4255 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 4256 break; 4257 case PCIE_LNK_X12: 4258 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 4259 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 4260 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 4261 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 4262 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 4263 break; 4264 case PCIE_LNK_X8: 4265 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 4266 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 4267 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 4268 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 4269 break; 4270 case PCIE_LNK_X4: 4271 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 4272 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 4273 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 4274 break; 4275 case PCIE_LNK_X2: 4276 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 4277 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 4278 break; 4279 case PCIE_LNK_X1: 4280 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; 4281 break; 4282 default: 4283 break; 4284 } 4285 } 4286 } 4287 } 4288 4289