1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/power_supply.h> 29 #include <linux/kthread.h> 30 #include <linux/console.h> 31 #include <linux/slab.h> 32 #include <drm/drmP.h> 33 #include <drm/drm_atomic_helper.h> 34 #include <drm/drm_probe_helper.h> 35 #include <drm/amdgpu_drm.h> 36 #include <linux/vgaarb.h> 37 #include <linux/vga_switcheroo.h> 38 #include <linux/efi.h> 39 #include "amdgpu.h" 40 #include "amdgpu_trace.h" 41 #include "amdgpu_i2c.h" 42 #include "atom.h" 43 #include "amdgpu_atombios.h" 44 #include "amdgpu_atomfirmware.h" 45 #include "amd_pcie.h" 46 #ifdef CONFIG_DRM_AMDGPU_SI 47 #include "si.h" 48 #endif 49 #ifdef CONFIG_DRM_AMDGPU_CIK 50 #include "cik.h" 51 #endif 52 #include "vi.h" 53 #include "soc15.h" 54 #include "bif/bif_4_1_d.h" 55 #include <linux/pci.h> 56 #include <linux/firmware.h> 57 #include "amdgpu_vf_error.h" 58 59 #include "amdgpu_amdkfd.h" 60 #include "amdgpu_pm.h" 61 62 #include "amdgpu_xgmi.h" 63 #include "amdgpu_ras.h" 64 65 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); 66 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); 67 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); 68 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin"); 69 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin"); 70 71 #define AMDGPU_RESUME_MS 2000 72 73 static const char *amdgpu_asic_name[] = { 74 "TAHITI", 75 "PITCAIRN", 76 "VERDE", 77 "OLAND", 78 "HAINAN", 79 "BONAIRE", 80 "KAVERI", 81 "KABINI", 82 "HAWAII", 83 "MULLINS", 84 "TOPAZ", 85 "TONGA", 86 "FIJI", 87 "CARRIZO", 88 "STONEY", 89 "POLARIS10", 90 "POLARIS11", 91 "POLARIS12", 92 "VEGAM", 93 "VEGA10", 94 "VEGA12", 95 "VEGA20", 96 "RAVEN", 97 "LAST", 98 }; 99 100 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev); 101 102 /** 103 * amdgpu_device_is_px - Is the device is a dGPU with HG/PX power control 104 * 105 * @dev: drm_device pointer 106 * 107 * Returns true if the device is a dGPU with HG/PX power control, 108 * otherwise return false. 109 */ 110 bool amdgpu_device_is_px(struct drm_device *dev) 111 { 112 struct amdgpu_device *adev = dev->dev_private; 113 114 if (adev->flags & AMD_IS_PX) 115 return true; 116 return false; 117 } 118 119 /* 120 * MMIO register access helper functions. 121 */ 122 /** 123 * amdgpu_mm_rreg - read a memory mapped IO register 124 * 125 * @adev: amdgpu_device pointer 126 * @reg: dword aligned register offset 127 * @acc_flags: access flags which require special behavior 128 * 129 * Returns the 32 bit value from the offset specified. 130 */ 131 uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, 132 uint32_t acc_flags) 133 { 134 uint32_t ret; 135 136 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) 137 return amdgpu_virt_kiq_rreg(adev, reg); 138 139 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) 140 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); 141 else { 142 unsigned long flags; 143 144 spin_lock_irqsave(&adev->mmio_idx_lock, flags); 145 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); 146 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); 147 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); 148 } 149 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret); 150 return ret; 151 } 152 153 /* 154 * MMIO register read with bytes helper functions 155 * @offset:bytes offset from MMIO start 156 * 157 */ 158 159 /** 160 * amdgpu_mm_rreg8 - read a memory mapped IO register 161 * 162 * @adev: amdgpu_device pointer 163 * @offset: byte aligned register offset 164 * 165 * Returns the 8 bit value from the offset specified. 166 */ 167 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) { 168 if (offset < adev->rmmio_size) 169 return (readb(adev->rmmio + offset)); 170 BUG(); 171 } 172 173 /* 174 * MMIO register write with bytes helper functions 175 * @offset:bytes offset from MMIO start 176 * @value: the value want to be written to the register 177 * 178 */ 179 /** 180 * amdgpu_mm_wreg8 - read a memory mapped IO register 181 * 182 * @adev: amdgpu_device pointer 183 * @offset: byte aligned register offset 184 * @value: 8 bit value to write 185 * 186 * Writes the value specified to the offset specified. 187 */ 188 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) { 189 if (offset < adev->rmmio_size) 190 writeb(value, adev->rmmio + offset); 191 else 192 BUG(); 193 } 194 195 /** 196 * amdgpu_mm_wreg - write to a memory mapped IO register 197 * 198 * @adev: amdgpu_device pointer 199 * @reg: dword aligned register offset 200 * @v: 32 bit value to write to the register 201 * @acc_flags: access flags which require special behavior 202 * 203 * Writes the value specified to the offset specified. 204 */ 205 void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, 206 uint32_t acc_flags) 207 { 208 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); 209 210 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { 211 adev->last_mm_index = v; 212 } 213 214 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) 215 return amdgpu_virt_kiq_wreg(adev, reg, v); 216 217 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) 218 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); 219 else { 220 unsigned long flags; 221 222 spin_lock_irqsave(&adev->mmio_idx_lock, flags); 223 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); 224 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); 225 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); 226 } 227 228 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { 229 udelay(500); 230 } 231 } 232 233 /** 234 * amdgpu_io_rreg - read an IO register 235 * 236 * @adev: amdgpu_device pointer 237 * @reg: dword aligned register offset 238 * 239 * Returns the 32 bit value from the offset specified. 240 */ 241 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg) 242 { 243 if ((reg * 4) < adev->rio_mem_size) 244 return ioread32(adev->rio_mem + (reg * 4)); 245 else { 246 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4)); 247 return ioread32(adev->rio_mem + (mmMM_DATA * 4)); 248 } 249 } 250 251 /** 252 * amdgpu_io_wreg - write to an IO register 253 * 254 * @adev: amdgpu_device pointer 255 * @reg: dword aligned register offset 256 * @v: 32 bit value to write to the register 257 * 258 * Writes the value specified to the offset specified. 259 */ 260 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 261 { 262 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { 263 adev->last_mm_index = v; 264 } 265 266 if ((reg * 4) < adev->rio_mem_size) 267 iowrite32(v, adev->rio_mem + (reg * 4)); 268 else { 269 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4)); 270 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4)); 271 } 272 273 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { 274 udelay(500); 275 } 276 } 277 278 /** 279 * amdgpu_mm_rdoorbell - read a doorbell dword 280 * 281 * @adev: amdgpu_device pointer 282 * @index: doorbell index 283 * 284 * Returns the value in the doorbell aperture at the 285 * requested doorbell index (CIK). 286 */ 287 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index) 288 { 289 if (index < adev->doorbell.num_doorbells) { 290 return readl(adev->doorbell.ptr + index); 291 } else { 292 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); 293 return 0; 294 } 295 } 296 297 /** 298 * amdgpu_mm_wdoorbell - write a doorbell dword 299 * 300 * @adev: amdgpu_device pointer 301 * @index: doorbell index 302 * @v: value to write 303 * 304 * Writes @v to the doorbell aperture at the 305 * requested doorbell index (CIK). 306 */ 307 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v) 308 { 309 if (index < adev->doorbell.num_doorbells) { 310 writel(v, adev->doorbell.ptr + index); 311 } else { 312 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); 313 } 314 } 315 316 /** 317 * amdgpu_mm_rdoorbell64 - read a doorbell Qword 318 * 319 * @adev: amdgpu_device pointer 320 * @index: doorbell index 321 * 322 * Returns the value in the doorbell aperture at the 323 * requested doorbell index (VEGA10+). 324 */ 325 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index) 326 { 327 if (index < adev->doorbell.num_doorbells) { 328 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index)); 329 } else { 330 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); 331 return 0; 332 } 333 } 334 335 /** 336 * amdgpu_mm_wdoorbell64 - write a doorbell Qword 337 * 338 * @adev: amdgpu_device pointer 339 * @index: doorbell index 340 * @v: value to write 341 * 342 * Writes @v to the doorbell aperture at the 343 * requested doorbell index (VEGA10+). 344 */ 345 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v) 346 { 347 if (index < adev->doorbell.num_doorbells) { 348 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v); 349 } else { 350 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); 351 } 352 } 353 354 /** 355 * amdgpu_invalid_rreg - dummy reg read function 356 * 357 * @adev: amdgpu device pointer 358 * @reg: offset of register 359 * 360 * Dummy register read function. Used for register blocks 361 * that certain asics don't have (all asics). 362 * Returns the value in the register. 363 */ 364 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg) 365 { 366 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); 367 BUG(); 368 return 0; 369 } 370 371 /** 372 * amdgpu_invalid_wreg - dummy reg write function 373 * 374 * @adev: amdgpu device pointer 375 * @reg: offset of register 376 * @v: value to write to the register 377 * 378 * Dummy register read function. Used for register blocks 379 * that certain asics don't have (all asics). 380 */ 381 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) 382 { 383 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", 384 reg, v); 385 BUG(); 386 } 387 388 /** 389 * amdgpu_block_invalid_rreg - dummy reg read function 390 * 391 * @adev: amdgpu device pointer 392 * @block: offset of instance 393 * @reg: offset of register 394 * 395 * Dummy register read function. Used for register blocks 396 * that certain asics don't have (all asics). 397 * Returns the value in the register. 398 */ 399 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev, 400 uint32_t block, uint32_t reg) 401 { 402 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n", 403 reg, block); 404 BUG(); 405 return 0; 406 } 407 408 /** 409 * amdgpu_block_invalid_wreg - dummy reg write function 410 * 411 * @adev: amdgpu device pointer 412 * @block: offset of instance 413 * @reg: offset of register 414 * @v: value to write to the register 415 * 416 * Dummy register read function. Used for register blocks 417 * that certain asics don't have (all asics). 418 */ 419 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev, 420 uint32_t block, 421 uint32_t reg, uint32_t v) 422 { 423 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n", 424 reg, block, v); 425 BUG(); 426 } 427 428 /** 429 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page 430 * 431 * @adev: amdgpu device pointer 432 * 433 * Allocates a scratch page of VRAM for use by various things in the 434 * driver. 435 */ 436 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev) 437 { 438 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, 439 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 440 &adev->vram_scratch.robj, 441 &adev->vram_scratch.gpu_addr, 442 (void **)&adev->vram_scratch.ptr); 443 } 444 445 /** 446 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page 447 * 448 * @adev: amdgpu device pointer 449 * 450 * Frees the VRAM scratch page. 451 */ 452 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev) 453 { 454 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL); 455 } 456 457 /** 458 * amdgpu_device_program_register_sequence - program an array of registers. 459 * 460 * @adev: amdgpu_device pointer 461 * @registers: pointer to the register array 462 * @array_size: size of the register array 463 * 464 * Programs an array or registers with and and or masks. 465 * This is a helper for setting golden registers. 466 */ 467 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, 468 const u32 *registers, 469 const u32 array_size) 470 { 471 u32 tmp, reg, and_mask, or_mask; 472 int i; 473 474 if (array_size % 3) 475 return; 476 477 for (i = 0; i < array_size; i +=3) { 478 reg = registers[i + 0]; 479 and_mask = registers[i + 1]; 480 or_mask = registers[i + 2]; 481 482 if (and_mask == 0xffffffff) { 483 tmp = or_mask; 484 } else { 485 tmp = RREG32(reg); 486 tmp &= ~and_mask; 487 tmp |= or_mask; 488 } 489 WREG32(reg, tmp); 490 } 491 } 492 493 /** 494 * amdgpu_device_pci_config_reset - reset the GPU 495 * 496 * @adev: amdgpu_device pointer 497 * 498 * Resets the GPU using the pci config reset sequence. 499 * Only applicable to asics prior to vega10. 500 */ 501 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev) 502 { 503 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA); 504 } 505 506 /* 507 * GPU doorbell aperture helpers function. 508 */ 509 /** 510 * amdgpu_device_doorbell_init - Init doorbell driver information. 511 * 512 * @adev: amdgpu_device pointer 513 * 514 * Init doorbell driver information (CIK) 515 * Returns 0 on success, error on failure. 516 */ 517 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev) 518 { 519 520 /* No doorbell on SI hardware generation */ 521 if (adev->asic_type < CHIP_BONAIRE) { 522 adev->doorbell.base = 0; 523 adev->doorbell.size = 0; 524 adev->doorbell.num_doorbells = 0; 525 adev->doorbell.ptr = NULL; 526 return 0; 527 } 528 529 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET) 530 return -EINVAL; 531 532 amdgpu_asic_init_doorbell_index(adev); 533 534 /* doorbell bar mapping */ 535 adev->doorbell.base = pci_resource_start(adev->pdev, 2); 536 adev->doorbell.size = pci_resource_len(adev->pdev, 2); 537 538 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32), 539 adev->doorbell_index.max_assignment+1); 540 if (adev->doorbell.num_doorbells == 0) 541 return -EINVAL; 542 543 /* For Vega, reserve and map two pages on doorbell BAR since SDMA 544 * paging queue doorbell use the second page. The 545 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the 546 * doorbells are in the first page. So with paging queue enabled, 547 * the max num_doorbells should + 1 page (0x400 in dword) 548 */ 549 if (adev->asic_type >= CHIP_VEGA10) 550 adev->doorbell.num_doorbells += 0x400; 551 552 adev->doorbell.ptr = ioremap(adev->doorbell.base, 553 adev->doorbell.num_doorbells * 554 sizeof(u32)); 555 if (adev->doorbell.ptr == NULL) 556 return -ENOMEM; 557 558 return 0; 559 } 560 561 /** 562 * amdgpu_device_doorbell_fini - Tear down doorbell driver information. 563 * 564 * @adev: amdgpu_device pointer 565 * 566 * Tear down doorbell driver information (CIK) 567 */ 568 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev) 569 { 570 iounmap(adev->doorbell.ptr); 571 adev->doorbell.ptr = NULL; 572 } 573 574 575 576 /* 577 * amdgpu_device_wb_*() 578 * Writeback is the method by which the GPU updates special pages in memory 579 * with the status of certain GPU events (fences, ring pointers,etc.). 580 */ 581 582 /** 583 * amdgpu_device_wb_fini - Disable Writeback and free memory 584 * 585 * @adev: amdgpu_device pointer 586 * 587 * Disables Writeback and frees the Writeback memory (all asics). 588 * Used at driver shutdown. 589 */ 590 static void amdgpu_device_wb_fini(struct amdgpu_device *adev) 591 { 592 if (adev->wb.wb_obj) { 593 amdgpu_bo_free_kernel(&adev->wb.wb_obj, 594 &adev->wb.gpu_addr, 595 (void **)&adev->wb.wb); 596 adev->wb.wb_obj = NULL; 597 } 598 } 599 600 /** 601 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory 602 * 603 * @adev: amdgpu_device pointer 604 * 605 * Initializes writeback and allocates writeback memory (all asics). 606 * Used at driver startup. 607 * Returns 0 on success or an -error on failure. 608 */ 609 static int amdgpu_device_wb_init(struct amdgpu_device *adev) 610 { 611 int r; 612 613 if (adev->wb.wb_obj == NULL) { 614 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */ 615 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8, 616 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 617 &adev->wb.wb_obj, &adev->wb.gpu_addr, 618 (void **)&adev->wb.wb); 619 if (r) { 620 dev_warn(adev->dev, "(%d) create WB bo failed\n", r); 621 return r; 622 } 623 624 adev->wb.num_wb = AMDGPU_MAX_WB; 625 memset(&adev->wb.used, 0, sizeof(adev->wb.used)); 626 627 /* clear wb memory */ 628 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8); 629 } 630 631 return 0; 632 } 633 634 /** 635 * amdgpu_device_wb_get - Allocate a wb entry 636 * 637 * @adev: amdgpu_device pointer 638 * @wb: wb index 639 * 640 * Allocate a wb slot for use by the driver (all asics). 641 * Returns 0 on success or -EINVAL on failure. 642 */ 643 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb) 644 { 645 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); 646 647 if (offset < adev->wb.num_wb) { 648 __set_bit(offset, adev->wb.used); 649 *wb = offset << 3; /* convert to dw offset */ 650 return 0; 651 } else { 652 return -EINVAL; 653 } 654 } 655 656 /** 657 * amdgpu_device_wb_free - Free a wb entry 658 * 659 * @adev: amdgpu_device pointer 660 * @wb: wb index 661 * 662 * Free a wb slot allocated for use by the driver (all asics) 663 */ 664 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) 665 { 666 wb >>= 3; 667 if (wb < adev->wb.num_wb) 668 __clear_bit(wb, adev->wb.used); 669 } 670 671 /** 672 * amdgpu_device_resize_fb_bar - try to resize FB BAR 673 * 674 * @adev: amdgpu_device pointer 675 * 676 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not 677 * to fail, but if any of the BARs is not accessible after the size we abort 678 * driver loading by returning -ENODEV. 679 */ 680 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) 681 { 682 u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size); 683 u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1; 684 struct pci_bus *root; 685 struct resource *res; 686 unsigned i; 687 u16 cmd; 688 int r; 689 690 /* Bypass for VF */ 691 if (amdgpu_sriov_vf(adev)) 692 return 0; 693 694 /* Check if the root BUS has 64bit memory resources */ 695 root = adev->pdev->bus; 696 while (root->parent) 697 root = root->parent; 698 699 pci_bus_for_each_resource(root, res, i) { 700 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) && 701 res->start > 0x100000000ull) 702 break; 703 } 704 705 /* Trying to resize is pointless without a root hub window above 4GB */ 706 if (!res) 707 return 0; 708 709 /* Disable memory decoding while we change the BAR addresses and size */ 710 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd); 711 pci_write_config_word(adev->pdev, PCI_COMMAND, 712 cmd & ~PCI_COMMAND_MEMORY); 713 714 /* Free the VRAM and doorbell BAR, we most likely need to move both. */ 715 amdgpu_device_doorbell_fini(adev); 716 if (adev->asic_type >= CHIP_BONAIRE) 717 pci_release_resource(adev->pdev, 2); 718 719 pci_release_resource(adev->pdev, 0); 720 721 r = pci_resize_resource(adev->pdev, 0, rbar_size); 722 if (r == -ENOSPC) 723 DRM_INFO("Not enough PCI address space for a large BAR."); 724 else if (r && r != -ENOTSUPP) 725 DRM_ERROR("Problem resizing BAR0 (%d).", r); 726 727 pci_assign_unassigned_bus_resources(adev->pdev->bus); 728 729 /* When the doorbell or fb BAR isn't available we have no chance of 730 * using the device. 731 */ 732 r = amdgpu_device_doorbell_init(adev); 733 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET)) 734 return -ENODEV; 735 736 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd); 737 738 return 0; 739 } 740 741 /* 742 * GPU helpers function. 743 */ 744 /** 745 * amdgpu_device_need_post - check if the hw need post or not 746 * 747 * @adev: amdgpu_device pointer 748 * 749 * Check if the asic has been initialized (all asics) at driver startup 750 * or post is needed if hw reset is performed. 751 * Returns true if need or false if not. 752 */ 753 bool amdgpu_device_need_post(struct amdgpu_device *adev) 754 { 755 uint32_t reg; 756 757 if (amdgpu_sriov_vf(adev)) 758 return false; 759 760 if (amdgpu_passthrough(adev)) { 761 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot 762 * some old smc fw still need driver do vPost otherwise gpu hang, while 763 * those smc fw version above 22.15 doesn't have this flaw, so we force 764 * vpost executed for smc version below 22.15 765 */ 766 if (adev->asic_type == CHIP_FIJI) { 767 int err; 768 uint32_t fw_ver; 769 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev); 770 /* force vPost if error occured */ 771 if (err) 772 return true; 773 774 fw_ver = *((uint32_t *)adev->pm.fw->data + 69); 775 if (fw_ver < 0x00160e00) 776 return true; 777 } 778 } 779 780 if (adev->has_hw_reset) { 781 adev->has_hw_reset = false; 782 return true; 783 } 784 785 /* bios scratch used on CIK+ */ 786 if (adev->asic_type >= CHIP_BONAIRE) 787 return amdgpu_atombios_scratch_need_asic_init(adev); 788 789 /* check MEM_SIZE for older asics */ 790 reg = amdgpu_asic_get_config_memsize(adev); 791 792 if ((reg != 0) && (reg != 0xffffffff)) 793 return false; 794 795 return true; 796 } 797 798 /* if we get transitioned to only one device, take VGA back */ 799 /** 800 * amdgpu_device_vga_set_decode - enable/disable vga decode 801 * 802 * @cookie: amdgpu_device pointer 803 * @state: enable/disable vga decode 804 * 805 * Enable/disable vga decode (all asics). 806 * Returns VGA resource flags. 807 */ 808 static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state) 809 { 810 struct amdgpu_device *adev = cookie; 811 amdgpu_asic_set_vga_state(adev, state); 812 if (state) 813 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 814 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 815 else 816 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 817 } 818 819 /** 820 * amdgpu_device_check_block_size - validate the vm block size 821 * 822 * @adev: amdgpu_device pointer 823 * 824 * Validates the vm block size specified via module parameter. 825 * The vm block size defines number of bits in page table versus page directory, 826 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the 827 * page table and the remaining bits are in the page directory. 828 */ 829 static void amdgpu_device_check_block_size(struct amdgpu_device *adev) 830 { 831 /* defines number of bits in page table versus page directory, 832 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the 833 * page table and the remaining bits are in the page directory */ 834 if (amdgpu_vm_block_size == -1) 835 return; 836 837 if (amdgpu_vm_block_size < 9) { 838 dev_warn(adev->dev, "VM page table size (%d) too small\n", 839 amdgpu_vm_block_size); 840 amdgpu_vm_block_size = -1; 841 } 842 } 843 844 /** 845 * amdgpu_device_check_vm_size - validate the vm size 846 * 847 * @adev: amdgpu_device pointer 848 * 849 * Validates the vm size in GB specified via module parameter. 850 * The VM size is the size of the GPU virtual memory space in GB. 851 */ 852 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev) 853 { 854 /* no need to check the default value */ 855 if (amdgpu_vm_size == -1) 856 return; 857 858 if (amdgpu_vm_size < 1) { 859 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n", 860 amdgpu_vm_size); 861 amdgpu_vm_size = -1; 862 } 863 } 864 865 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev) 866 { 867 struct sysinfo si; 868 bool is_os_64 = (sizeof(void *) == 8) ? true : false; 869 uint64_t total_memory; 870 uint64_t dram_size_seven_GB = 0x1B8000000; 871 uint64_t dram_size_three_GB = 0xB8000000; 872 873 if (amdgpu_smu_memory_pool_size == 0) 874 return; 875 876 if (!is_os_64) { 877 DRM_WARN("Not 64-bit OS, feature not supported\n"); 878 goto def_value; 879 } 880 si_meminfo(&si); 881 total_memory = (uint64_t)si.totalram * si.mem_unit; 882 883 if ((amdgpu_smu_memory_pool_size == 1) || 884 (amdgpu_smu_memory_pool_size == 2)) { 885 if (total_memory < dram_size_three_GB) 886 goto def_value1; 887 } else if ((amdgpu_smu_memory_pool_size == 4) || 888 (amdgpu_smu_memory_pool_size == 8)) { 889 if (total_memory < dram_size_seven_GB) 890 goto def_value1; 891 } else { 892 DRM_WARN("Smu memory pool size not supported\n"); 893 goto def_value; 894 } 895 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28; 896 897 return; 898 899 def_value1: 900 DRM_WARN("No enough system memory\n"); 901 def_value: 902 adev->pm.smu_prv_buffer_size = 0; 903 } 904 905 /** 906 * amdgpu_device_check_arguments - validate module params 907 * 908 * @adev: amdgpu_device pointer 909 * 910 * Validates certain module parameters and updates 911 * the associated values used by the driver (all asics). 912 */ 913 static void amdgpu_device_check_arguments(struct amdgpu_device *adev) 914 { 915 if (amdgpu_sched_jobs < 4) { 916 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", 917 amdgpu_sched_jobs); 918 amdgpu_sched_jobs = 4; 919 } else if (!is_power_of_2(amdgpu_sched_jobs)){ 920 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n", 921 amdgpu_sched_jobs); 922 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); 923 } 924 925 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) { 926 /* gart size must be greater or equal to 32M */ 927 dev_warn(adev->dev, "gart size (%d) too small\n", 928 amdgpu_gart_size); 929 amdgpu_gart_size = -1; 930 } 931 932 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) { 933 /* gtt size must be greater or equal to 32M */ 934 dev_warn(adev->dev, "gtt size (%d) too small\n", 935 amdgpu_gtt_size); 936 amdgpu_gtt_size = -1; 937 } 938 939 /* valid range is between 4 and 9 inclusive */ 940 if (amdgpu_vm_fragment_size != -1 && 941 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) { 942 dev_warn(adev->dev, "valid range is between 4 and 9\n"); 943 amdgpu_vm_fragment_size = -1; 944 } 945 946 amdgpu_device_check_smu_prv_buffer_size(adev); 947 948 amdgpu_device_check_vm_size(adev); 949 950 amdgpu_device_check_block_size(adev); 951 952 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 || 953 !is_power_of_2(amdgpu_vram_page_split))) { 954 dev_warn(adev->dev, "invalid VRAM page split (%d)\n", 955 amdgpu_vram_page_split); 956 amdgpu_vram_page_split = 1024; 957 } 958 959 if (amdgpu_lockup_timeout == 0) { 960 dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n"); 961 amdgpu_lockup_timeout = 10000; 962 } 963 964 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); 965 } 966 967 /** 968 * amdgpu_switcheroo_set_state - set switcheroo state 969 * 970 * @pdev: pci dev pointer 971 * @state: vga_switcheroo state 972 * 973 * Callback for the switcheroo driver. Suspends or resumes the 974 * the asics before or after it is powered up using ACPI methods. 975 */ 976 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 977 { 978 struct drm_device *dev = pci_get_drvdata(pdev); 979 980 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF) 981 return; 982 983 if (state == VGA_SWITCHEROO_ON) { 984 pr_info("amdgpu: switched on\n"); 985 /* don't suspend or resume card normally */ 986 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 987 988 amdgpu_device_resume(dev, true, true); 989 990 dev->switch_power_state = DRM_SWITCH_POWER_ON; 991 drm_kms_helper_poll_enable(dev); 992 } else { 993 pr_info("amdgpu: switched off\n"); 994 drm_kms_helper_poll_disable(dev); 995 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 996 amdgpu_device_suspend(dev, true, true); 997 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 998 } 999 } 1000 1001 /** 1002 * amdgpu_switcheroo_can_switch - see if switcheroo state can change 1003 * 1004 * @pdev: pci dev pointer 1005 * 1006 * Callback for the switcheroo driver. Check of the switcheroo 1007 * state can be changed. 1008 * Returns true if the state can be changed, false if not. 1009 */ 1010 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev) 1011 { 1012 struct drm_device *dev = pci_get_drvdata(pdev); 1013 1014 /* 1015 * FIXME: open_count is protected by drm_global_mutex but that would lead to 1016 * locking inversion with the driver load path. And the access here is 1017 * completely racy anyway. So don't bother with locking for now. 1018 */ 1019 return dev->open_count == 0; 1020 } 1021 1022 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = { 1023 .set_gpu_state = amdgpu_switcheroo_set_state, 1024 .reprobe = NULL, 1025 .can_switch = amdgpu_switcheroo_can_switch, 1026 }; 1027 1028 /** 1029 * amdgpu_device_ip_set_clockgating_state - set the CG state 1030 * 1031 * @dev: amdgpu_device pointer 1032 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1033 * @state: clockgating state (gate or ungate) 1034 * 1035 * Sets the requested clockgating state for all instances of 1036 * the hardware IP specified. 1037 * Returns the error code from the last instance. 1038 */ 1039 int amdgpu_device_ip_set_clockgating_state(void *dev, 1040 enum amd_ip_block_type block_type, 1041 enum amd_clockgating_state state) 1042 { 1043 struct amdgpu_device *adev = dev; 1044 int i, r = 0; 1045 1046 for (i = 0; i < adev->num_ip_blocks; i++) { 1047 if (!adev->ip_blocks[i].status.valid) 1048 continue; 1049 if (adev->ip_blocks[i].version->type != block_type) 1050 continue; 1051 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state) 1052 continue; 1053 r = adev->ip_blocks[i].version->funcs->set_clockgating_state( 1054 (void *)adev, state); 1055 if (r) 1056 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n", 1057 adev->ip_blocks[i].version->funcs->name, r); 1058 } 1059 return r; 1060 } 1061 1062 /** 1063 * amdgpu_device_ip_set_powergating_state - set the PG state 1064 * 1065 * @dev: amdgpu_device pointer 1066 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1067 * @state: powergating state (gate or ungate) 1068 * 1069 * Sets the requested powergating state for all instances of 1070 * the hardware IP specified. 1071 * Returns the error code from the last instance. 1072 */ 1073 int amdgpu_device_ip_set_powergating_state(void *dev, 1074 enum amd_ip_block_type block_type, 1075 enum amd_powergating_state state) 1076 { 1077 struct amdgpu_device *adev = dev; 1078 int i, r = 0; 1079 1080 for (i = 0; i < adev->num_ip_blocks; i++) { 1081 if (!adev->ip_blocks[i].status.valid) 1082 continue; 1083 if (adev->ip_blocks[i].version->type != block_type) 1084 continue; 1085 if (!adev->ip_blocks[i].version->funcs->set_powergating_state) 1086 continue; 1087 r = adev->ip_blocks[i].version->funcs->set_powergating_state( 1088 (void *)adev, state); 1089 if (r) 1090 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n", 1091 adev->ip_blocks[i].version->funcs->name, r); 1092 } 1093 return r; 1094 } 1095 1096 /** 1097 * amdgpu_device_ip_get_clockgating_state - get the CG state 1098 * 1099 * @adev: amdgpu_device pointer 1100 * @flags: clockgating feature flags 1101 * 1102 * Walks the list of IPs on the device and updates the clockgating 1103 * flags for each IP. 1104 * Updates @flags with the feature flags for each hardware IP where 1105 * clockgating is enabled. 1106 */ 1107 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev, 1108 u32 *flags) 1109 { 1110 int i; 1111 1112 for (i = 0; i < adev->num_ip_blocks; i++) { 1113 if (!adev->ip_blocks[i].status.valid) 1114 continue; 1115 if (adev->ip_blocks[i].version->funcs->get_clockgating_state) 1116 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags); 1117 } 1118 } 1119 1120 /** 1121 * amdgpu_device_ip_wait_for_idle - wait for idle 1122 * 1123 * @adev: amdgpu_device pointer 1124 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1125 * 1126 * Waits for the request hardware IP to be idle. 1127 * Returns 0 for success or a negative error code on failure. 1128 */ 1129 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, 1130 enum amd_ip_block_type block_type) 1131 { 1132 int i, r; 1133 1134 for (i = 0; i < adev->num_ip_blocks; i++) { 1135 if (!adev->ip_blocks[i].status.valid) 1136 continue; 1137 if (adev->ip_blocks[i].version->type == block_type) { 1138 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev); 1139 if (r) 1140 return r; 1141 break; 1142 } 1143 } 1144 return 0; 1145 1146 } 1147 1148 /** 1149 * amdgpu_device_ip_is_idle - is the hardware IP idle 1150 * 1151 * @adev: amdgpu_device pointer 1152 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1153 * 1154 * Check if the hardware IP is idle or not. 1155 * Returns true if it the IP is idle, false if not. 1156 */ 1157 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev, 1158 enum amd_ip_block_type block_type) 1159 { 1160 int i; 1161 1162 for (i = 0; i < adev->num_ip_blocks; i++) { 1163 if (!adev->ip_blocks[i].status.valid) 1164 continue; 1165 if (adev->ip_blocks[i].version->type == block_type) 1166 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev); 1167 } 1168 return true; 1169 1170 } 1171 1172 /** 1173 * amdgpu_device_ip_get_ip_block - get a hw IP pointer 1174 * 1175 * @adev: amdgpu_device pointer 1176 * @type: Type of hardware IP (SMU, GFX, UVD, etc.) 1177 * 1178 * Returns a pointer to the hardware IP block structure 1179 * if it exists for the asic, otherwise NULL. 1180 */ 1181 struct amdgpu_ip_block * 1182 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev, 1183 enum amd_ip_block_type type) 1184 { 1185 int i; 1186 1187 for (i = 0; i < adev->num_ip_blocks; i++) 1188 if (adev->ip_blocks[i].version->type == type) 1189 return &adev->ip_blocks[i]; 1190 1191 return NULL; 1192 } 1193 1194 /** 1195 * amdgpu_device_ip_block_version_cmp 1196 * 1197 * @adev: amdgpu_device pointer 1198 * @type: enum amd_ip_block_type 1199 * @major: major version 1200 * @minor: minor version 1201 * 1202 * return 0 if equal or greater 1203 * return 1 if smaller or the ip_block doesn't exist 1204 */ 1205 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev, 1206 enum amd_ip_block_type type, 1207 u32 major, u32 minor) 1208 { 1209 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type); 1210 1211 if (ip_block && ((ip_block->version->major > major) || 1212 ((ip_block->version->major == major) && 1213 (ip_block->version->minor >= minor)))) 1214 return 0; 1215 1216 return 1; 1217 } 1218 1219 /** 1220 * amdgpu_device_ip_block_add 1221 * 1222 * @adev: amdgpu_device pointer 1223 * @ip_block_version: pointer to the IP to add 1224 * 1225 * Adds the IP block driver information to the collection of IPs 1226 * on the asic. 1227 */ 1228 int amdgpu_device_ip_block_add(struct amdgpu_device *adev, 1229 const struct amdgpu_ip_block_version *ip_block_version) 1230 { 1231 if (!ip_block_version) 1232 return -EINVAL; 1233 1234 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks, 1235 ip_block_version->funcs->name); 1236 1237 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version; 1238 1239 return 0; 1240 } 1241 1242 /** 1243 * amdgpu_device_enable_virtual_display - enable virtual display feature 1244 * 1245 * @adev: amdgpu_device pointer 1246 * 1247 * Enabled the virtual display feature if the user has enabled it via 1248 * the module parameter virtual_display. This feature provides a virtual 1249 * display hardware on headless boards or in virtualized environments. 1250 * This function parses and validates the configuration string specified by 1251 * the user and configues the virtual display configuration (number of 1252 * virtual connectors, crtcs, etc.) specified. 1253 */ 1254 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) 1255 { 1256 adev->enable_virtual_display = false; 1257 1258 if (amdgpu_virtual_display) { 1259 struct drm_device *ddev = adev->ddev; 1260 const char *pci_address_name = pci_name(ddev->pdev); 1261 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname; 1262 1263 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL); 1264 pciaddstr_tmp = pciaddstr; 1265 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) { 1266 pciaddname = strsep(&pciaddname_tmp, ","); 1267 if (!strcmp("all", pciaddname) 1268 || !strcmp(pci_address_name, pciaddname)) { 1269 long num_crtc; 1270 int res = -1; 1271 1272 adev->enable_virtual_display = true; 1273 1274 if (pciaddname_tmp) 1275 res = kstrtol(pciaddname_tmp, 10, 1276 &num_crtc); 1277 1278 if (!res) { 1279 if (num_crtc < 1) 1280 num_crtc = 1; 1281 if (num_crtc > 6) 1282 num_crtc = 6; 1283 adev->mode_info.num_crtc = num_crtc; 1284 } else { 1285 adev->mode_info.num_crtc = 1; 1286 } 1287 break; 1288 } 1289 } 1290 1291 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n", 1292 amdgpu_virtual_display, pci_address_name, 1293 adev->enable_virtual_display, adev->mode_info.num_crtc); 1294 1295 kfree(pciaddstr); 1296 } 1297 } 1298 1299 /** 1300 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware 1301 * 1302 * @adev: amdgpu_device pointer 1303 * 1304 * Parses the asic configuration parameters specified in the gpu info 1305 * firmware and makes them availale to the driver for use in configuring 1306 * the asic. 1307 * Returns 0 on success, -EINVAL on failure. 1308 */ 1309 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) 1310 { 1311 const char *chip_name; 1312 char fw_name[30]; 1313 int err; 1314 const struct gpu_info_firmware_header_v1_0 *hdr; 1315 1316 adev->firmware.gpu_info_fw = NULL; 1317 1318 switch (adev->asic_type) { 1319 case CHIP_TOPAZ: 1320 case CHIP_TONGA: 1321 case CHIP_FIJI: 1322 case CHIP_POLARIS10: 1323 case CHIP_POLARIS11: 1324 case CHIP_POLARIS12: 1325 case CHIP_VEGAM: 1326 case CHIP_CARRIZO: 1327 case CHIP_STONEY: 1328 #ifdef CONFIG_DRM_AMDGPU_SI 1329 case CHIP_VERDE: 1330 case CHIP_TAHITI: 1331 case CHIP_PITCAIRN: 1332 case CHIP_OLAND: 1333 case CHIP_HAINAN: 1334 #endif 1335 #ifdef CONFIG_DRM_AMDGPU_CIK 1336 case CHIP_BONAIRE: 1337 case CHIP_HAWAII: 1338 case CHIP_KAVERI: 1339 case CHIP_KABINI: 1340 case CHIP_MULLINS: 1341 #endif 1342 case CHIP_VEGA20: 1343 default: 1344 return 0; 1345 case CHIP_VEGA10: 1346 chip_name = "vega10"; 1347 break; 1348 case CHIP_VEGA12: 1349 chip_name = "vega12"; 1350 break; 1351 case CHIP_RAVEN: 1352 if (adev->rev_id >= 8) 1353 chip_name = "raven2"; 1354 else if (adev->pdev->device == 0x15d8) 1355 chip_name = "picasso"; 1356 else 1357 chip_name = "raven"; 1358 break; 1359 } 1360 1361 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name); 1362 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev); 1363 if (err) { 1364 dev_err(adev->dev, 1365 "Failed to load gpu_info firmware \"%s\"\n", 1366 fw_name); 1367 goto out; 1368 } 1369 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw); 1370 if (err) { 1371 dev_err(adev->dev, 1372 "Failed to validate gpu_info firmware \"%s\"\n", 1373 fw_name); 1374 goto out; 1375 } 1376 1377 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data; 1378 amdgpu_ucode_print_gpu_info_hdr(&hdr->header); 1379 1380 switch (hdr->version_major) { 1381 case 1: 1382 { 1383 const struct gpu_info_firmware_v1_0 *gpu_info_fw = 1384 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data + 1385 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 1386 1387 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se); 1388 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh); 1389 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se); 1390 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se); 1391 adev->gfx.config.max_texture_channel_caches = 1392 le32_to_cpu(gpu_info_fw->gc_num_tccs); 1393 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs); 1394 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds); 1395 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth); 1396 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth); 1397 adev->gfx.config.double_offchip_lds_buf = 1398 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer); 1399 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size); 1400 adev->gfx.cu_info.max_waves_per_simd = 1401 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd); 1402 adev->gfx.cu_info.max_scratch_slots_per_cu = 1403 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu); 1404 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size); 1405 break; 1406 } 1407 default: 1408 dev_err(adev->dev, 1409 "Unsupported gpu_info table %d\n", hdr->header.ucode_version); 1410 err = -EINVAL; 1411 goto out; 1412 } 1413 out: 1414 return err; 1415 } 1416 1417 /** 1418 * amdgpu_device_ip_early_init - run early init for hardware IPs 1419 * 1420 * @adev: amdgpu_device pointer 1421 * 1422 * Early initialization pass for hardware IPs. The hardware IPs that make 1423 * up each asic are discovered each IP's early_init callback is run. This 1424 * is the first stage in initializing the asic. 1425 * Returns 0 on success, negative error code on failure. 1426 */ 1427 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) 1428 { 1429 int i, r; 1430 1431 amdgpu_device_enable_virtual_display(adev); 1432 1433 switch (adev->asic_type) { 1434 case CHIP_TOPAZ: 1435 case CHIP_TONGA: 1436 case CHIP_FIJI: 1437 case CHIP_POLARIS10: 1438 case CHIP_POLARIS11: 1439 case CHIP_POLARIS12: 1440 case CHIP_VEGAM: 1441 case CHIP_CARRIZO: 1442 case CHIP_STONEY: 1443 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) 1444 adev->family = AMDGPU_FAMILY_CZ; 1445 else 1446 adev->family = AMDGPU_FAMILY_VI; 1447 1448 r = vi_set_ip_blocks(adev); 1449 if (r) 1450 return r; 1451 break; 1452 #ifdef CONFIG_DRM_AMDGPU_SI 1453 case CHIP_VERDE: 1454 case CHIP_TAHITI: 1455 case CHIP_PITCAIRN: 1456 case CHIP_OLAND: 1457 case CHIP_HAINAN: 1458 adev->family = AMDGPU_FAMILY_SI; 1459 r = si_set_ip_blocks(adev); 1460 if (r) 1461 return r; 1462 break; 1463 #endif 1464 #ifdef CONFIG_DRM_AMDGPU_CIK 1465 case CHIP_BONAIRE: 1466 case CHIP_HAWAII: 1467 case CHIP_KAVERI: 1468 case CHIP_KABINI: 1469 case CHIP_MULLINS: 1470 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII)) 1471 adev->family = AMDGPU_FAMILY_CI; 1472 else 1473 adev->family = AMDGPU_FAMILY_KV; 1474 1475 r = cik_set_ip_blocks(adev); 1476 if (r) 1477 return r; 1478 break; 1479 #endif 1480 case CHIP_VEGA10: 1481 case CHIP_VEGA12: 1482 case CHIP_VEGA20: 1483 case CHIP_RAVEN: 1484 if (adev->asic_type == CHIP_RAVEN) 1485 adev->family = AMDGPU_FAMILY_RV; 1486 else 1487 adev->family = AMDGPU_FAMILY_AI; 1488 1489 r = soc15_set_ip_blocks(adev); 1490 if (r) 1491 return r; 1492 break; 1493 default: 1494 /* FIXME: not supported yet */ 1495 return -EINVAL; 1496 } 1497 1498 r = amdgpu_device_parse_gpu_info_fw(adev); 1499 if (r) 1500 return r; 1501 1502 amdgpu_amdkfd_device_probe(adev); 1503 1504 if (amdgpu_sriov_vf(adev)) { 1505 r = amdgpu_virt_request_full_gpu(adev, true); 1506 if (r) 1507 return -EAGAIN; 1508 } 1509 1510 adev->pm.pp_feature = amdgpu_pp_feature_mask; 1511 if (amdgpu_sriov_vf(adev)) 1512 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 1513 1514 for (i = 0; i < adev->num_ip_blocks; i++) { 1515 if ((amdgpu_ip_block_mask & (1 << i)) == 0) { 1516 DRM_ERROR("disabled ip block: %d <%s>\n", 1517 i, adev->ip_blocks[i].version->funcs->name); 1518 adev->ip_blocks[i].status.valid = false; 1519 } else { 1520 if (adev->ip_blocks[i].version->funcs->early_init) { 1521 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev); 1522 if (r == -ENOENT) { 1523 adev->ip_blocks[i].status.valid = false; 1524 } else if (r) { 1525 DRM_ERROR("early_init of IP block <%s> failed %d\n", 1526 adev->ip_blocks[i].version->funcs->name, r); 1527 return r; 1528 } else { 1529 adev->ip_blocks[i].status.valid = true; 1530 } 1531 } else { 1532 adev->ip_blocks[i].status.valid = true; 1533 } 1534 } 1535 } 1536 1537 adev->cg_flags &= amdgpu_cg_mask; 1538 adev->pg_flags &= amdgpu_pg_mask; 1539 1540 return 0; 1541 } 1542 1543 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev) 1544 { 1545 int i, r; 1546 1547 for (i = 0; i < adev->num_ip_blocks; i++) { 1548 if (!adev->ip_blocks[i].status.sw) 1549 continue; 1550 if (adev->ip_blocks[i].status.hw) 1551 continue; 1552 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 1553 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { 1554 r = adev->ip_blocks[i].version->funcs->hw_init(adev); 1555 if (r) { 1556 DRM_ERROR("hw_init of IP block <%s> failed %d\n", 1557 adev->ip_blocks[i].version->funcs->name, r); 1558 return r; 1559 } 1560 adev->ip_blocks[i].status.hw = true; 1561 } 1562 } 1563 1564 return 0; 1565 } 1566 1567 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev) 1568 { 1569 int i, r; 1570 1571 for (i = 0; i < adev->num_ip_blocks; i++) { 1572 if (!adev->ip_blocks[i].status.sw) 1573 continue; 1574 if (adev->ip_blocks[i].status.hw) 1575 continue; 1576 r = adev->ip_blocks[i].version->funcs->hw_init(adev); 1577 if (r) { 1578 DRM_ERROR("hw_init of IP block <%s> failed %d\n", 1579 adev->ip_blocks[i].version->funcs->name, r); 1580 return r; 1581 } 1582 adev->ip_blocks[i].status.hw = true; 1583 } 1584 1585 return 0; 1586 } 1587 1588 static int amdgpu_device_fw_loading(struct amdgpu_device *adev) 1589 { 1590 int r = 0; 1591 int i; 1592 uint32_t smu_version; 1593 1594 if (adev->asic_type >= CHIP_VEGA10) { 1595 for (i = 0; i < adev->num_ip_blocks; i++) { 1596 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { 1597 if (adev->in_gpu_reset || adev->in_suspend) { 1598 if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) 1599 break; /* sriov gpu reset, psp need to do hw_init before IH because of hw limit */ 1600 r = adev->ip_blocks[i].version->funcs->resume(adev); 1601 if (r) { 1602 DRM_ERROR("resume of IP block <%s> failed %d\n", 1603 adev->ip_blocks[i].version->funcs->name, r); 1604 return r; 1605 } 1606 } else { 1607 r = adev->ip_blocks[i].version->funcs->hw_init(adev); 1608 if (r) { 1609 DRM_ERROR("hw_init of IP block <%s> failed %d\n", 1610 adev->ip_blocks[i].version->funcs->name, r); 1611 return r; 1612 } 1613 } 1614 adev->ip_blocks[i].status.hw = true; 1615 } 1616 } 1617 } 1618 r = amdgpu_pm_load_smu_firmware(adev, &smu_version); 1619 1620 return r; 1621 } 1622 1623 /** 1624 * amdgpu_device_ip_init - run init for hardware IPs 1625 * 1626 * @adev: amdgpu_device pointer 1627 * 1628 * Main initialization pass for hardware IPs. The list of all the hardware 1629 * IPs that make up the asic is walked and the sw_init and hw_init callbacks 1630 * are run. sw_init initializes the software state associated with each IP 1631 * and hw_init initializes the hardware associated with each IP. 1632 * Returns 0 on success, negative error code on failure. 1633 */ 1634 static int amdgpu_device_ip_init(struct amdgpu_device *adev) 1635 { 1636 int i, r; 1637 1638 r = amdgpu_ras_init(adev); 1639 if (r) 1640 return r; 1641 1642 for (i = 0; i < adev->num_ip_blocks; i++) { 1643 if (!adev->ip_blocks[i].status.valid) 1644 continue; 1645 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev); 1646 if (r) { 1647 DRM_ERROR("sw_init of IP block <%s> failed %d\n", 1648 adev->ip_blocks[i].version->funcs->name, r); 1649 goto init_failed; 1650 } 1651 adev->ip_blocks[i].status.sw = true; 1652 1653 /* need to do gmc hw init early so we can allocate gpu mem */ 1654 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { 1655 r = amdgpu_device_vram_scratch_init(adev); 1656 if (r) { 1657 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); 1658 goto init_failed; 1659 } 1660 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); 1661 if (r) { 1662 DRM_ERROR("hw_init %d failed %d\n", i, r); 1663 goto init_failed; 1664 } 1665 r = amdgpu_device_wb_init(adev); 1666 if (r) { 1667 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r); 1668 goto init_failed; 1669 } 1670 adev->ip_blocks[i].status.hw = true; 1671 1672 /* right after GMC hw init, we create CSA */ 1673 if (amdgpu_sriov_vf(adev)) { 1674 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj, 1675 AMDGPU_GEM_DOMAIN_VRAM, 1676 AMDGPU_CSA_SIZE); 1677 if (r) { 1678 DRM_ERROR("allocate CSA failed %d\n", r); 1679 goto init_failed; 1680 } 1681 } 1682 } 1683 } 1684 1685 r = amdgpu_ib_pool_init(adev); 1686 if (r) { 1687 dev_err(adev->dev, "IB initialization failed (%d).\n", r); 1688 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r); 1689 goto init_failed; 1690 } 1691 1692 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/ 1693 if (r) 1694 goto init_failed; 1695 1696 r = amdgpu_device_ip_hw_init_phase1(adev); 1697 if (r) 1698 goto init_failed; 1699 1700 r = amdgpu_device_fw_loading(adev); 1701 if (r) 1702 goto init_failed; 1703 1704 r = amdgpu_device_ip_hw_init_phase2(adev); 1705 if (r) 1706 goto init_failed; 1707 1708 if (adev->gmc.xgmi.num_physical_nodes > 1) 1709 amdgpu_xgmi_add_device(adev); 1710 amdgpu_amdkfd_device_init(adev); 1711 1712 init_failed: 1713 if (amdgpu_sriov_vf(adev)) { 1714 if (!r) 1715 amdgpu_virt_init_data_exchange(adev); 1716 amdgpu_virt_release_full_gpu(adev, true); 1717 } 1718 1719 return r; 1720 } 1721 1722 /** 1723 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer 1724 * 1725 * @adev: amdgpu_device pointer 1726 * 1727 * Writes a reset magic value to the gart pointer in VRAM. The driver calls 1728 * this function before a GPU reset. If the value is retained after a 1729 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents. 1730 */ 1731 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev) 1732 { 1733 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM); 1734 } 1735 1736 /** 1737 * amdgpu_device_check_vram_lost - check if vram is valid 1738 * 1739 * @adev: amdgpu_device pointer 1740 * 1741 * Checks the reset magic value written to the gart pointer in VRAM. 1742 * The driver calls this after a GPU reset to see if the contents of 1743 * VRAM is lost or now. 1744 * returns true if vram is lost, false if not. 1745 */ 1746 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev) 1747 { 1748 return !!memcmp(adev->gart.ptr, adev->reset_magic, 1749 AMDGPU_RESET_MAGIC_NUM); 1750 } 1751 1752 /** 1753 * amdgpu_device_set_cg_state - set clockgating for amdgpu device 1754 * 1755 * @adev: amdgpu_device pointer 1756 * 1757 * The list of all the hardware IPs that make up the asic is walked and the 1758 * set_clockgating_state callbacks are run. 1759 * Late initialization pass enabling clockgating for hardware IPs. 1760 * Fini or suspend, pass disabling clockgating for hardware IPs. 1761 * Returns 0 on success, negative error code on failure. 1762 */ 1763 1764 static int amdgpu_device_set_cg_state(struct amdgpu_device *adev, 1765 enum amd_clockgating_state state) 1766 { 1767 int i, j, r; 1768 1769 if (amdgpu_emu_mode == 1) 1770 return 0; 1771 1772 for (j = 0; j < adev->num_ip_blocks; j++) { 1773 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; 1774 if (!adev->ip_blocks[i].status.late_initialized) 1775 continue; 1776 /* skip CG for VCE/UVD, it's handled specially */ 1777 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 1778 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && 1779 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && 1780 adev->ip_blocks[i].version->funcs->set_clockgating_state) { 1781 /* enable clockgating to save power */ 1782 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, 1783 state); 1784 if (r) { 1785 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", 1786 adev->ip_blocks[i].version->funcs->name, r); 1787 return r; 1788 } 1789 } 1790 } 1791 1792 return 0; 1793 } 1794 1795 static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state) 1796 { 1797 int i, j, r; 1798 1799 if (amdgpu_emu_mode == 1) 1800 return 0; 1801 1802 for (j = 0; j < adev->num_ip_blocks; j++) { 1803 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; 1804 if (!adev->ip_blocks[i].status.late_initialized) 1805 continue; 1806 /* skip CG for VCE/UVD, it's handled specially */ 1807 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 1808 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && 1809 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && 1810 adev->ip_blocks[i].version->funcs->set_powergating_state) { 1811 /* enable powergating to save power */ 1812 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev, 1813 state); 1814 if (r) { 1815 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n", 1816 adev->ip_blocks[i].version->funcs->name, r); 1817 return r; 1818 } 1819 } 1820 } 1821 return 0; 1822 } 1823 1824 /** 1825 * amdgpu_device_ip_late_init - run late init for hardware IPs 1826 * 1827 * @adev: amdgpu_device pointer 1828 * 1829 * Late initialization pass for hardware IPs. The list of all the hardware 1830 * IPs that make up the asic is walked and the late_init callbacks are run. 1831 * late_init covers any special initialization that an IP requires 1832 * after all of the have been initialized or something that needs to happen 1833 * late in the init process. 1834 * Returns 0 on success, negative error code on failure. 1835 */ 1836 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) 1837 { 1838 int i = 0, r; 1839 1840 for (i = 0; i < adev->num_ip_blocks; i++) { 1841 if (!adev->ip_blocks[i].status.hw) 1842 continue; 1843 if (adev->ip_blocks[i].version->funcs->late_init) { 1844 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev); 1845 if (r) { 1846 DRM_ERROR("late_init of IP block <%s> failed %d\n", 1847 adev->ip_blocks[i].version->funcs->name, r); 1848 return r; 1849 } 1850 } 1851 adev->ip_blocks[i].status.late_initialized = true; 1852 } 1853 1854 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE); 1855 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE); 1856 1857 queue_delayed_work(system_wq, &adev->late_init_work, 1858 msecs_to_jiffies(AMDGPU_RESUME_MS)); 1859 1860 amdgpu_device_fill_reset_magic(adev); 1861 1862 return 0; 1863 } 1864 1865 /** 1866 * amdgpu_device_ip_fini - run fini for hardware IPs 1867 * 1868 * @adev: amdgpu_device pointer 1869 * 1870 * Main teardown pass for hardware IPs. The list of all the hardware 1871 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks 1872 * are run. hw_fini tears down the hardware associated with each IP 1873 * and sw_fini tears down any software state associated with each IP. 1874 * Returns 0 on success, negative error code on failure. 1875 */ 1876 static int amdgpu_device_ip_fini(struct amdgpu_device *adev) 1877 { 1878 int i, r; 1879 1880 amdgpu_ras_pre_fini(adev); 1881 1882 if (adev->gmc.xgmi.num_physical_nodes > 1) 1883 amdgpu_xgmi_remove_device(adev); 1884 1885 amdgpu_amdkfd_device_fini(adev); 1886 1887 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); 1888 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); 1889 1890 /* need to disable SMC first */ 1891 for (i = 0; i < adev->num_ip_blocks; i++) { 1892 if (!adev->ip_blocks[i].status.hw) 1893 continue; 1894 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { 1895 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); 1896 /* XXX handle errors */ 1897 if (r) { 1898 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", 1899 adev->ip_blocks[i].version->funcs->name, r); 1900 } 1901 adev->ip_blocks[i].status.hw = false; 1902 break; 1903 } 1904 } 1905 1906 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1907 if (!adev->ip_blocks[i].status.hw) 1908 continue; 1909 1910 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); 1911 /* XXX handle errors */ 1912 if (r) { 1913 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", 1914 adev->ip_blocks[i].version->funcs->name, r); 1915 } 1916 1917 adev->ip_blocks[i].status.hw = false; 1918 } 1919 1920 1921 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1922 if (!adev->ip_blocks[i].status.sw) 1923 continue; 1924 1925 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { 1926 amdgpu_ucode_free_bo(adev); 1927 amdgpu_free_static_csa(&adev->virt.csa_obj); 1928 amdgpu_device_wb_fini(adev); 1929 amdgpu_device_vram_scratch_fini(adev); 1930 amdgpu_ib_pool_fini(adev); 1931 } 1932 1933 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev); 1934 /* XXX handle errors */ 1935 if (r) { 1936 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", 1937 adev->ip_blocks[i].version->funcs->name, r); 1938 } 1939 adev->ip_blocks[i].status.sw = false; 1940 adev->ip_blocks[i].status.valid = false; 1941 } 1942 1943 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1944 if (!adev->ip_blocks[i].status.late_initialized) 1945 continue; 1946 if (adev->ip_blocks[i].version->funcs->late_fini) 1947 adev->ip_blocks[i].version->funcs->late_fini((void *)adev); 1948 adev->ip_blocks[i].status.late_initialized = false; 1949 } 1950 1951 amdgpu_ras_fini(adev); 1952 1953 if (amdgpu_sriov_vf(adev)) 1954 if (amdgpu_virt_release_full_gpu(adev, false)) 1955 DRM_ERROR("failed to release exclusive mode on fini\n"); 1956 1957 return 0; 1958 } 1959 1960 static int amdgpu_device_enable_mgpu_fan_boost(void) 1961 { 1962 struct amdgpu_gpu_instance *gpu_ins; 1963 struct amdgpu_device *adev; 1964 int i, ret = 0; 1965 1966 mutex_lock(&mgpu_info.mutex); 1967 1968 /* 1969 * MGPU fan boost feature should be enabled 1970 * only when there are two or more dGPUs in 1971 * the system 1972 */ 1973 if (mgpu_info.num_dgpu < 2) 1974 goto out; 1975 1976 for (i = 0; i < mgpu_info.num_dgpu; i++) { 1977 gpu_ins = &(mgpu_info.gpu_ins[i]); 1978 adev = gpu_ins->adev; 1979 if (!(adev->flags & AMD_IS_APU) && 1980 !gpu_ins->mgpu_fan_enabled && 1981 adev->powerplay.pp_funcs && 1982 adev->powerplay.pp_funcs->enable_mgpu_fan_boost) { 1983 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev); 1984 if (ret) 1985 break; 1986 1987 gpu_ins->mgpu_fan_enabled = 1; 1988 } 1989 } 1990 1991 out: 1992 mutex_unlock(&mgpu_info.mutex); 1993 1994 return ret; 1995 } 1996 1997 /** 1998 * amdgpu_device_ip_late_init_func_handler - work handler for ib test 1999 * 2000 * @work: work_struct. 2001 */ 2002 static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work) 2003 { 2004 struct amdgpu_device *adev = 2005 container_of(work, struct amdgpu_device, late_init_work.work); 2006 int r; 2007 2008 r = amdgpu_ib_ring_tests(adev); 2009 if (r) 2010 DRM_ERROR("ib ring test failed (%d).\n", r); 2011 2012 r = amdgpu_device_enable_mgpu_fan_boost(); 2013 if (r) 2014 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r); 2015 2016 /*set to low pstate by default */ 2017 amdgpu_xgmi_set_pstate(adev, 0); 2018 2019 } 2020 2021 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work) 2022 { 2023 struct amdgpu_device *adev = 2024 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work); 2025 2026 mutex_lock(&adev->gfx.gfx_off_mutex); 2027 if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) { 2028 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true)) 2029 adev->gfx.gfx_off_state = true; 2030 } 2031 mutex_unlock(&adev->gfx.gfx_off_mutex); 2032 } 2033 2034 /** 2035 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1) 2036 * 2037 * @adev: amdgpu_device pointer 2038 * 2039 * Main suspend function for hardware IPs. The list of all the hardware 2040 * IPs that make up the asic is walked, clockgating is disabled and the 2041 * suspend callbacks are run. suspend puts the hardware and software state 2042 * in each IP into a state suitable for suspend. 2043 * Returns 0 on success, negative error code on failure. 2044 */ 2045 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) 2046 { 2047 int i, r; 2048 2049 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); 2050 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); 2051 2052 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2053 if (!adev->ip_blocks[i].status.valid) 2054 continue; 2055 /* displays are handled separately */ 2056 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) { 2057 /* XXX handle errors */ 2058 r = adev->ip_blocks[i].version->funcs->suspend(adev); 2059 /* XXX handle errors */ 2060 if (r) { 2061 DRM_ERROR("suspend of IP block <%s> failed %d\n", 2062 adev->ip_blocks[i].version->funcs->name, r); 2063 } 2064 } 2065 } 2066 2067 return 0; 2068 } 2069 2070 /** 2071 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2) 2072 * 2073 * @adev: amdgpu_device pointer 2074 * 2075 * Main suspend function for hardware IPs. The list of all the hardware 2076 * IPs that make up the asic is walked, clockgating is disabled and the 2077 * suspend callbacks are run. suspend puts the hardware and software state 2078 * in each IP into a state suitable for suspend. 2079 * Returns 0 on success, negative error code on failure. 2080 */ 2081 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) 2082 { 2083 int i, r; 2084 2085 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2086 if (!adev->ip_blocks[i].status.valid) 2087 continue; 2088 /* displays are handled in phase1 */ 2089 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) 2090 continue; 2091 /* XXX handle errors */ 2092 r = adev->ip_blocks[i].version->funcs->suspend(adev); 2093 /* XXX handle errors */ 2094 if (r) { 2095 DRM_ERROR("suspend of IP block <%s> failed %d\n", 2096 adev->ip_blocks[i].version->funcs->name, r); 2097 } 2098 } 2099 2100 return 0; 2101 } 2102 2103 /** 2104 * amdgpu_device_ip_suspend - run suspend for hardware IPs 2105 * 2106 * @adev: amdgpu_device pointer 2107 * 2108 * Main suspend function for hardware IPs. The list of all the hardware 2109 * IPs that make up the asic is walked, clockgating is disabled and the 2110 * suspend callbacks are run. suspend puts the hardware and software state 2111 * in each IP into a state suitable for suspend. 2112 * Returns 0 on success, negative error code on failure. 2113 */ 2114 int amdgpu_device_ip_suspend(struct amdgpu_device *adev) 2115 { 2116 int r; 2117 2118 if (amdgpu_sriov_vf(adev)) 2119 amdgpu_virt_request_full_gpu(adev, false); 2120 2121 r = amdgpu_device_ip_suspend_phase1(adev); 2122 if (r) 2123 return r; 2124 r = amdgpu_device_ip_suspend_phase2(adev); 2125 2126 if (amdgpu_sriov_vf(adev)) 2127 amdgpu_virt_release_full_gpu(adev, false); 2128 2129 return r; 2130 } 2131 2132 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) 2133 { 2134 int i, r; 2135 2136 static enum amd_ip_block_type ip_order[] = { 2137 AMD_IP_BLOCK_TYPE_GMC, 2138 AMD_IP_BLOCK_TYPE_COMMON, 2139 AMD_IP_BLOCK_TYPE_PSP, 2140 AMD_IP_BLOCK_TYPE_IH, 2141 }; 2142 2143 for (i = 0; i < ARRAY_SIZE(ip_order); i++) { 2144 int j; 2145 struct amdgpu_ip_block *block; 2146 2147 for (j = 0; j < adev->num_ip_blocks; j++) { 2148 block = &adev->ip_blocks[j]; 2149 2150 if (block->version->type != ip_order[i] || 2151 !block->status.valid) 2152 continue; 2153 2154 r = block->version->funcs->hw_init(adev); 2155 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); 2156 if (r) 2157 return r; 2158 } 2159 } 2160 2161 return 0; 2162 } 2163 2164 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) 2165 { 2166 int i, r; 2167 2168 static enum amd_ip_block_type ip_order[] = { 2169 AMD_IP_BLOCK_TYPE_SMC, 2170 AMD_IP_BLOCK_TYPE_DCE, 2171 AMD_IP_BLOCK_TYPE_GFX, 2172 AMD_IP_BLOCK_TYPE_SDMA, 2173 AMD_IP_BLOCK_TYPE_UVD, 2174 AMD_IP_BLOCK_TYPE_VCE 2175 }; 2176 2177 for (i = 0; i < ARRAY_SIZE(ip_order); i++) { 2178 int j; 2179 struct amdgpu_ip_block *block; 2180 2181 for (j = 0; j < adev->num_ip_blocks; j++) { 2182 block = &adev->ip_blocks[j]; 2183 2184 if (block->version->type != ip_order[i] || 2185 !block->status.valid) 2186 continue; 2187 2188 r = block->version->funcs->hw_init(adev); 2189 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); 2190 if (r) 2191 return r; 2192 } 2193 } 2194 2195 return 0; 2196 } 2197 2198 /** 2199 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs 2200 * 2201 * @adev: amdgpu_device pointer 2202 * 2203 * First resume function for hardware IPs. The list of all the hardware 2204 * IPs that make up the asic is walked and the resume callbacks are run for 2205 * COMMON, GMC, and IH. resume puts the hardware into a functional state 2206 * after a suspend and updates the software state as necessary. This 2207 * function is also used for restoring the GPU after a GPU reset. 2208 * Returns 0 on success, negative error code on failure. 2209 */ 2210 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev) 2211 { 2212 int i, r; 2213 2214 for (i = 0; i < adev->num_ip_blocks; i++) { 2215 if (!adev->ip_blocks[i].status.valid) 2216 continue; 2217 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 2218 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 2219 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { 2220 r = adev->ip_blocks[i].version->funcs->resume(adev); 2221 if (r) { 2222 DRM_ERROR("resume of IP block <%s> failed %d\n", 2223 adev->ip_blocks[i].version->funcs->name, r); 2224 return r; 2225 } 2226 } 2227 } 2228 2229 return 0; 2230 } 2231 2232 /** 2233 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs 2234 * 2235 * @adev: amdgpu_device pointer 2236 * 2237 * First resume function for hardware IPs. The list of all the hardware 2238 * IPs that make up the asic is walked and the resume callbacks are run for 2239 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a 2240 * functional state after a suspend and updates the software state as 2241 * necessary. This function is also used for restoring the GPU after a GPU 2242 * reset. 2243 * Returns 0 on success, negative error code on failure. 2244 */ 2245 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) 2246 { 2247 int i, r; 2248 2249 for (i = 0; i < adev->num_ip_blocks; i++) { 2250 if (!adev->ip_blocks[i].status.valid) 2251 continue; 2252 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 2253 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 2254 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || 2255 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) 2256 continue; 2257 r = adev->ip_blocks[i].version->funcs->resume(adev); 2258 if (r) { 2259 DRM_ERROR("resume of IP block <%s> failed %d\n", 2260 adev->ip_blocks[i].version->funcs->name, r); 2261 return r; 2262 } 2263 } 2264 2265 return 0; 2266 } 2267 2268 /** 2269 * amdgpu_device_ip_resume - run resume for hardware IPs 2270 * 2271 * @adev: amdgpu_device pointer 2272 * 2273 * Main resume function for hardware IPs. The hardware IPs 2274 * are split into two resume functions because they are 2275 * are also used in in recovering from a GPU reset and some additional 2276 * steps need to be take between them. In this case (S3/S4) they are 2277 * run sequentially. 2278 * Returns 0 on success, negative error code on failure. 2279 */ 2280 static int amdgpu_device_ip_resume(struct amdgpu_device *adev) 2281 { 2282 int r; 2283 2284 r = amdgpu_device_ip_resume_phase1(adev); 2285 if (r) 2286 return r; 2287 2288 r = amdgpu_device_fw_loading(adev); 2289 if (r) 2290 return r; 2291 2292 r = amdgpu_device_ip_resume_phase2(adev); 2293 2294 return r; 2295 } 2296 2297 /** 2298 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV 2299 * 2300 * @adev: amdgpu_device pointer 2301 * 2302 * Query the VBIOS data tables to determine if the board supports SR-IOV. 2303 */ 2304 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) 2305 { 2306 if (amdgpu_sriov_vf(adev)) { 2307 if (adev->is_atom_fw) { 2308 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev)) 2309 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; 2310 } else { 2311 if (amdgpu_atombios_has_gpu_virtualization_table(adev)) 2312 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; 2313 } 2314 2315 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)) 2316 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0); 2317 } 2318 } 2319 2320 /** 2321 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic 2322 * 2323 * @asic_type: AMD asic type 2324 * 2325 * Check if there is DC (new modesetting infrastructre) support for an asic. 2326 * returns true if DC has support, false if not. 2327 */ 2328 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) 2329 { 2330 switch (asic_type) { 2331 #if defined(CONFIG_DRM_AMD_DC) 2332 case CHIP_BONAIRE: 2333 case CHIP_KAVERI: 2334 case CHIP_KABINI: 2335 case CHIP_MULLINS: 2336 /* 2337 * We have systems in the wild with these ASICs that require 2338 * LVDS and VGA support which is not supported with DC. 2339 * 2340 * Fallback to the non-DC driver here by default so as not to 2341 * cause regressions. 2342 */ 2343 return amdgpu_dc > 0; 2344 case CHIP_HAWAII: 2345 case CHIP_CARRIZO: 2346 case CHIP_STONEY: 2347 case CHIP_POLARIS10: 2348 case CHIP_POLARIS11: 2349 case CHIP_POLARIS12: 2350 case CHIP_VEGAM: 2351 case CHIP_TONGA: 2352 case CHIP_FIJI: 2353 case CHIP_VEGA10: 2354 case CHIP_VEGA12: 2355 case CHIP_VEGA20: 2356 #if defined(CONFIG_DRM_AMD_DC_DCN1_0) 2357 case CHIP_RAVEN: 2358 #endif 2359 return amdgpu_dc != 0; 2360 #endif 2361 default: 2362 return false; 2363 } 2364 } 2365 2366 /** 2367 * amdgpu_device_has_dc_support - check if dc is supported 2368 * 2369 * @adev: amdgpu_device_pointer 2370 * 2371 * Returns true for supported, false for not supported 2372 */ 2373 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev) 2374 { 2375 if (amdgpu_sriov_vf(adev)) 2376 return false; 2377 2378 return amdgpu_device_asic_has_dc_support(adev->asic_type); 2379 } 2380 2381 2382 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) 2383 { 2384 struct amdgpu_device *adev = 2385 container_of(__work, struct amdgpu_device, xgmi_reset_work); 2386 2387 adev->asic_reset_res = amdgpu_asic_reset(adev); 2388 if (adev->asic_reset_res) 2389 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s", 2390 adev->asic_reset_res, adev->ddev->unique); 2391 } 2392 2393 2394 /** 2395 * amdgpu_device_init - initialize the driver 2396 * 2397 * @adev: amdgpu_device pointer 2398 * @ddev: drm dev pointer 2399 * @pdev: pci dev pointer 2400 * @flags: driver flags 2401 * 2402 * Initializes the driver info and hw (all asics). 2403 * Returns 0 for success or an error on failure. 2404 * Called at driver startup. 2405 */ 2406 int amdgpu_device_init(struct amdgpu_device *adev, 2407 struct drm_device *ddev, 2408 struct pci_dev *pdev, 2409 uint32_t flags) 2410 { 2411 int r, i; 2412 bool runtime = false; 2413 u32 max_MBps; 2414 2415 adev->shutdown = false; 2416 adev->dev = &pdev->dev; 2417 adev->ddev = ddev; 2418 adev->pdev = pdev; 2419 adev->flags = flags; 2420 adev->asic_type = flags & AMD_ASIC_MASK; 2421 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; 2422 if (amdgpu_emu_mode == 1) 2423 adev->usec_timeout *= 2; 2424 adev->gmc.gart_size = 512 * 1024 * 1024; 2425 adev->accel_working = false; 2426 adev->num_rings = 0; 2427 adev->mman.buffer_funcs = NULL; 2428 adev->mman.buffer_funcs_ring = NULL; 2429 adev->vm_manager.vm_pte_funcs = NULL; 2430 adev->vm_manager.vm_pte_num_rqs = 0; 2431 adev->gmc.gmc_funcs = NULL; 2432 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); 2433 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 2434 2435 adev->smc_rreg = &amdgpu_invalid_rreg; 2436 adev->smc_wreg = &amdgpu_invalid_wreg; 2437 adev->pcie_rreg = &amdgpu_invalid_rreg; 2438 adev->pcie_wreg = &amdgpu_invalid_wreg; 2439 adev->pciep_rreg = &amdgpu_invalid_rreg; 2440 adev->pciep_wreg = &amdgpu_invalid_wreg; 2441 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg; 2442 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg; 2443 adev->didt_rreg = &amdgpu_invalid_rreg; 2444 adev->didt_wreg = &amdgpu_invalid_wreg; 2445 adev->gc_cac_rreg = &amdgpu_invalid_rreg; 2446 adev->gc_cac_wreg = &amdgpu_invalid_wreg; 2447 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; 2448 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; 2449 2450 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", 2451 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, 2452 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); 2453 2454 /* mutex initialization are all done here so we 2455 * can recall function without having locking issues */ 2456 atomic_set(&adev->irq.ih.lock, 0); 2457 mutex_init(&adev->firmware.mutex); 2458 mutex_init(&adev->pm.mutex); 2459 mutex_init(&adev->gfx.gpu_clock_mutex); 2460 mutex_init(&adev->srbm_mutex); 2461 mutex_init(&adev->gfx.pipe_reserve_mutex); 2462 mutex_init(&adev->gfx.gfx_off_mutex); 2463 mutex_init(&adev->grbm_idx_mutex); 2464 mutex_init(&adev->mn_lock); 2465 mutex_init(&adev->virt.vf_errors.lock); 2466 hash_init(adev->mn_hash); 2467 mutex_init(&adev->lock_reset); 2468 mutex_init(&adev->virt.dpm_mutex); 2469 2470 amdgpu_device_check_arguments(adev); 2471 2472 spin_lock_init(&adev->mmio_idx_lock); 2473 spin_lock_init(&adev->smc_idx_lock); 2474 spin_lock_init(&adev->pcie_idx_lock); 2475 spin_lock_init(&adev->uvd_ctx_idx_lock); 2476 spin_lock_init(&adev->didt_idx_lock); 2477 spin_lock_init(&adev->gc_cac_idx_lock); 2478 spin_lock_init(&adev->se_cac_idx_lock); 2479 spin_lock_init(&adev->audio_endpt_idx_lock); 2480 spin_lock_init(&adev->mm_stats.lock); 2481 2482 INIT_LIST_HEAD(&adev->shadow_list); 2483 mutex_init(&adev->shadow_list_lock); 2484 2485 INIT_LIST_HEAD(&adev->ring_lru_list); 2486 spin_lock_init(&adev->ring_lru_list_lock); 2487 2488 INIT_DELAYED_WORK(&adev->late_init_work, 2489 amdgpu_device_ip_late_init_func_handler); 2490 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work, 2491 amdgpu_device_delay_enable_gfx_off); 2492 2493 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func); 2494 2495 adev->gfx.gfx_off_req_count = 1; 2496 adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false; 2497 2498 /* Registers mapping */ 2499 /* TODO: block userspace mapping of io register */ 2500 if (adev->asic_type >= CHIP_BONAIRE) { 2501 adev->rmmio_base = pci_resource_start(adev->pdev, 5); 2502 adev->rmmio_size = pci_resource_len(adev->pdev, 5); 2503 } else { 2504 adev->rmmio_base = pci_resource_start(adev->pdev, 2); 2505 adev->rmmio_size = pci_resource_len(adev->pdev, 2); 2506 } 2507 2508 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); 2509 if (adev->rmmio == NULL) { 2510 return -ENOMEM; 2511 } 2512 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); 2513 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); 2514 2515 /* io port mapping */ 2516 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 2517 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) { 2518 adev->rio_mem_size = pci_resource_len(adev->pdev, i); 2519 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size); 2520 break; 2521 } 2522 } 2523 if (adev->rio_mem == NULL) 2524 DRM_INFO("PCI I/O BAR is not found.\n"); 2525 2526 amdgpu_device_get_pcie_info(adev); 2527 2528 /* early init functions */ 2529 r = amdgpu_device_ip_early_init(adev); 2530 if (r) 2531 return r; 2532 2533 /* doorbell bar mapping and doorbell index init*/ 2534 amdgpu_device_doorbell_init(adev); 2535 2536 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */ 2537 /* this will fail for cards that aren't VGA class devices, just 2538 * ignore it */ 2539 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode); 2540 2541 if (amdgpu_device_is_px(ddev)) 2542 runtime = true; 2543 if (!pci_is_thunderbolt_attached(adev->pdev)) 2544 vga_switcheroo_register_client(adev->pdev, 2545 &amdgpu_switcheroo_ops, runtime); 2546 if (runtime) 2547 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); 2548 2549 if (amdgpu_emu_mode == 1) { 2550 /* post the asic on emulation mode */ 2551 emu_soc_asic_init(adev); 2552 goto fence_driver_init; 2553 } 2554 2555 /* Read BIOS */ 2556 if (!amdgpu_get_bios(adev)) { 2557 r = -EINVAL; 2558 goto failed; 2559 } 2560 2561 r = amdgpu_atombios_init(adev); 2562 if (r) { 2563 dev_err(adev->dev, "amdgpu_atombios_init failed\n"); 2564 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0); 2565 goto failed; 2566 } 2567 2568 /* detect if we are with an SRIOV vbios */ 2569 amdgpu_device_detect_sriov_bios(adev); 2570 2571 /* check if we need to reset the asic 2572 * E.g., driver was not cleanly unloaded previously, etc. 2573 */ 2574 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) { 2575 r = amdgpu_asic_reset(adev); 2576 if (r) { 2577 dev_err(adev->dev, "asic reset on init failed\n"); 2578 goto failed; 2579 } 2580 } 2581 2582 /* Post card if necessary */ 2583 if (amdgpu_device_need_post(adev)) { 2584 if (!adev->bios) { 2585 dev_err(adev->dev, "no vBIOS found\n"); 2586 r = -EINVAL; 2587 goto failed; 2588 } 2589 DRM_INFO("GPU posting now...\n"); 2590 r = amdgpu_atom_asic_init(adev->mode_info.atom_context); 2591 if (r) { 2592 dev_err(adev->dev, "gpu post error!\n"); 2593 goto failed; 2594 } 2595 } 2596 2597 if (adev->is_atom_fw) { 2598 /* Initialize clocks */ 2599 r = amdgpu_atomfirmware_get_clock_info(adev); 2600 if (r) { 2601 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n"); 2602 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); 2603 goto failed; 2604 } 2605 } else { 2606 /* Initialize clocks */ 2607 r = amdgpu_atombios_get_clock_info(adev); 2608 if (r) { 2609 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); 2610 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); 2611 goto failed; 2612 } 2613 /* init i2c buses */ 2614 if (!amdgpu_device_has_dc_support(adev)) 2615 amdgpu_atombios_i2c_init(adev); 2616 } 2617 2618 fence_driver_init: 2619 /* Fence driver */ 2620 r = amdgpu_fence_driver_init(adev); 2621 if (r) { 2622 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n"); 2623 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0); 2624 goto failed; 2625 } 2626 2627 /* init the mode config */ 2628 drm_mode_config_init(adev->ddev); 2629 2630 r = amdgpu_device_ip_init(adev); 2631 if (r) { 2632 /* failed in exclusive mode due to timeout */ 2633 if (amdgpu_sriov_vf(adev) && 2634 !amdgpu_sriov_runtime(adev) && 2635 amdgpu_virt_mmio_blocked(adev) && 2636 !amdgpu_virt_wait_reset(adev)) { 2637 dev_err(adev->dev, "VF exclusive mode timeout\n"); 2638 /* Don't send request since VF is inactive. */ 2639 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; 2640 adev->virt.ops = NULL; 2641 r = -EAGAIN; 2642 goto failed; 2643 } 2644 dev_err(adev->dev, "amdgpu_device_ip_init failed\n"); 2645 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); 2646 if (amdgpu_virt_request_full_gpu(adev, false)) 2647 amdgpu_virt_release_full_gpu(adev, false); 2648 goto failed; 2649 } 2650 2651 adev->accel_working = true; 2652 2653 amdgpu_vm_check_compute_bug(adev); 2654 2655 /* Initialize the buffer migration limit. */ 2656 if (amdgpu_moverate >= 0) 2657 max_MBps = amdgpu_moverate; 2658 else 2659 max_MBps = 8; /* Allow 8 MB/s. */ 2660 /* Get a log2 for easy divisions. */ 2661 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps)); 2662 2663 amdgpu_fbdev_init(adev); 2664 2665 r = amdgpu_pm_sysfs_init(adev); 2666 if (r) 2667 DRM_ERROR("registering pm debugfs failed (%d).\n", r); 2668 2669 r = amdgpu_debugfs_gem_init(adev); 2670 if (r) 2671 DRM_ERROR("registering gem debugfs failed (%d).\n", r); 2672 2673 r = amdgpu_debugfs_regs_init(adev); 2674 if (r) 2675 DRM_ERROR("registering register debugfs failed (%d).\n", r); 2676 2677 r = amdgpu_debugfs_firmware_init(adev); 2678 if (r) 2679 DRM_ERROR("registering firmware debugfs failed (%d).\n", r); 2680 2681 r = amdgpu_debugfs_init(adev); 2682 if (r) 2683 DRM_ERROR("Creating debugfs files failed (%d).\n", r); 2684 2685 if ((amdgpu_testing & 1)) { 2686 if (adev->accel_working) 2687 amdgpu_test_moves(adev); 2688 else 2689 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n"); 2690 } 2691 if (amdgpu_benchmarking) { 2692 if (adev->accel_working) 2693 amdgpu_benchmark(adev, amdgpu_benchmarking); 2694 else 2695 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n"); 2696 } 2697 2698 /* enable clockgating, etc. after ib tests, etc. since some blocks require 2699 * explicit gating rather than handling it automatically. 2700 */ 2701 r = amdgpu_device_ip_late_init(adev); 2702 if (r) { 2703 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n"); 2704 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r); 2705 goto failed; 2706 } 2707 2708 /* must succeed. */ 2709 amdgpu_ras_post_init(adev); 2710 2711 return 0; 2712 2713 failed: 2714 amdgpu_vf_error_trans_all(adev); 2715 if (runtime) 2716 vga_switcheroo_fini_domain_pm_ops(adev->dev); 2717 2718 return r; 2719 } 2720 2721 /** 2722 * amdgpu_device_fini - tear down the driver 2723 * 2724 * @adev: amdgpu_device pointer 2725 * 2726 * Tear down the driver info (all asics). 2727 * Called at driver shutdown. 2728 */ 2729 void amdgpu_device_fini(struct amdgpu_device *adev) 2730 { 2731 int r; 2732 2733 DRM_INFO("amdgpu: finishing device.\n"); 2734 adev->shutdown = true; 2735 /* disable all interrupts */ 2736 amdgpu_irq_disable_all(adev); 2737 if (adev->mode_info.mode_config_initialized){ 2738 if (!amdgpu_device_has_dc_support(adev)) 2739 drm_helper_force_disable_all(adev->ddev); 2740 else 2741 drm_atomic_helper_shutdown(adev->ddev); 2742 } 2743 amdgpu_fence_driver_fini(adev); 2744 amdgpu_pm_sysfs_fini(adev); 2745 amdgpu_fbdev_fini(adev); 2746 r = amdgpu_device_ip_fini(adev); 2747 if (adev->firmware.gpu_info_fw) { 2748 release_firmware(adev->firmware.gpu_info_fw); 2749 adev->firmware.gpu_info_fw = NULL; 2750 } 2751 adev->accel_working = false; 2752 cancel_delayed_work_sync(&adev->late_init_work); 2753 /* free i2c buses */ 2754 if (!amdgpu_device_has_dc_support(adev)) 2755 amdgpu_i2c_fini(adev); 2756 2757 if (amdgpu_emu_mode != 1) 2758 amdgpu_atombios_fini(adev); 2759 2760 kfree(adev->bios); 2761 adev->bios = NULL; 2762 if (!pci_is_thunderbolt_attached(adev->pdev)) 2763 vga_switcheroo_unregister_client(adev->pdev); 2764 if (adev->flags & AMD_IS_PX) 2765 vga_switcheroo_fini_domain_pm_ops(adev->dev); 2766 vga_client_register(adev->pdev, NULL, NULL, NULL); 2767 if (adev->rio_mem) 2768 pci_iounmap(adev->pdev, adev->rio_mem); 2769 adev->rio_mem = NULL; 2770 iounmap(adev->rmmio); 2771 adev->rmmio = NULL; 2772 amdgpu_device_doorbell_fini(adev); 2773 amdgpu_debugfs_regs_cleanup(adev); 2774 } 2775 2776 2777 /* 2778 * Suspend & resume. 2779 */ 2780 /** 2781 * amdgpu_device_suspend - initiate device suspend 2782 * 2783 * @dev: drm dev pointer 2784 * @suspend: suspend state 2785 * @fbcon : notify the fbdev of suspend 2786 * 2787 * Puts the hw in the suspend state (all asics). 2788 * Returns 0 for success or an error on failure. 2789 * Called at driver suspend. 2790 */ 2791 int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) 2792 { 2793 struct amdgpu_device *adev; 2794 struct drm_crtc *crtc; 2795 struct drm_connector *connector; 2796 int r; 2797 2798 if (dev == NULL || dev->dev_private == NULL) { 2799 return -ENODEV; 2800 } 2801 2802 adev = dev->dev_private; 2803 2804 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 2805 return 0; 2806 2807 adev->in_suspend = true; 2808 drm_kms_helper_poll_disable(dev); 2809 2810 if (fbcon) 2811 amdgpu_fbdev_set_suspend(adev, 1); 2812 2813 cancel_delayed_work_sync(&adev->late_init_work); 2814 2815 if (!amdgpu_device_has_dc_support(adev)) { 2816 /* turn off display hw */ 2817 drm_modeset_lock_all(dev); 2818 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 2819 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 2820 } 2821 drm_modeset_unlock_all(dev); 2822 /* unpin the front buffers and cursors */ 2823 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 2824 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2825 struct drm_framebuffer *fb = crtc->primary->fb; 2826 struct amdgpu_bo *robj; 2827 2828 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { 2829 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2830 r = amdgpu_bo_reserve(aobj, true); 2831 if (r == 0) { 2832 amdgpu_bo_unpin(aobj); 2833 amdgpu_bo_unreserve(aobj); 2834 } 2835 } 2836 2837 if (fb == NULL || fb->obj[0] == NULL) { 2838 continue; 2839 } 2840 robj = gem_to_amdgpu_bo(fb->obj[0]); 2841 /* don't unpin kernel fb objects */ 2842 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) { 2843 r = amdgpu_bo_reserve(robj, true); 2844 if (r == 0) { 2845 amdgpu_bo_unpin(robj); 2846 amdgpu_bo_unreserve(robj); 2847 } 2848 } 2849 } 2850 } 2851 2852 amdgpu_amdkfd_suspend(adev); 2853 2854 r = amdgpu_device_ip_suspend_phase1(adev); 2855 2856 /* evict vram memory */ 2857 amdgpu_bo_evict_vram(adev); 2858 2859 amdgpu_fence_driver_suspend(adev); 2860 2861 r = amdgpu_device_ip_suspend_phase2(adev); 2862 2863 /* evict remaining vram memory 2864 * This second call to evict vram is to evict the gart page table 2865 * using the CPU. 2866 */ 2867 amdgpu_bo_evict_vram(adev); 2868 2869 pci_save_state(dev->pdev); 2870 if (suspend) { 2871 /* Shut down the device */ 2872 pci_disable_device(dev->pdev); 2873 pci_set_power_state(dev->pdev, PCI_D3hot); 2874 } else { 2875 r = amdgpu_asic_reset(adev); 2876 if (r) 2877 DRM_ERROR("amdgpu asic reset failed\n"); 2878 } 2879 2880 return 0; 2881 } 2882 2883 /** 2884 * amdgpu_device_resume - initiate device resume 2885 * 2886 * @dev: drm dev pointer 2887 * @resume: resume state 2888 * @fbcon : notify the fbdev of resume 2889 * 2890 * Bring the hw back to operating state (all asics). 2891 * Returns 0 for success or an error on failure. 2892 * Called at driver resume. 2893 */ 2894 int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) 2895 { 2896 struct drm_connector *connector; 2897 struct amdgpu_device *adev = dev->dev_private; 2898 struct drm_crtc *crtc; 2899 int r = 0; 2900 2901 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 2902 return 0; 2903 2904 if (resume) { 2905 pci_set_power_state(dev->pdev, PCI_D0); 2906 pci_restore_state(dev->pdev); 2907 r = pci_enable_device(dev->pdev); 2908 if (r) 2909 return r; 2910 } 2911 2912 /* post card */ 2913 if (amdgpu_device_need_post(adev)) { 2914 r = amdgpu_atom_asic_init(adev->mode_info.atom_context); 2915 if (r) 2916 DRM_ERROR("amdgpu asic init failed\n"); 2917 } 2918 2919 r = amdgpu_device_ip_resume(adev); 2920 if (r) { 2921 DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r); 2922 return r; 2923 } 2924 amdgpu_fence_driver_resume(adev); 2925 2926 2927 r = amdgpu_device_ip_late_init(adev); 2928 if (r) 2929 return r; 2930 2931 if (!amdgpu_device_has_dc_support(adev)) { 2932 /* pin cursors */ 2933 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 2934 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2935 2936 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { 2937 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2938 r = amdgpu_bo_reserve(aobj, true); 2939 if (r == 0) { 2940 r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); 2941 if (r != 0) 2942 DRM_ERROR("Failed to pin cursor BO (%d)\n", r); 2943 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); 2944 amdgpu_bo_unreserve(aobj); 2945 } 2946 } 2947 } 2948 } 2949 r = amdgpu_amdkfd_resume(adev); 2950 if (r) 2951 return r; 2952 2953 /* Make sure IB tests flushed */ 2954 flush_delayed_work(&adev->late_init_work); 2955 2956 /* blat the mode back in */ 2957 if (fbcon) { 2958 if (!amdgpu_device_has_dc_support(adev)) { 2959 /* pre DCE11 */ 2960 drm_helper_resume_force_mode(dev); 2961 2962 /* turn on display hw */ 2963 drm_modeset_lock_all(dev); 2964 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 2965 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 2966 } 2967 drm_modeset_unlock_all(dev); 2968 } 2969 amdgpu_fbdev_set_suspend(adev, 0); 2970 } 2971 2972 drm_kms_helper_poll_enable(dev); 2973 2974 /* 2975 * Most of the connector probing functions try to acquire runtime pm 2976 * refs to ensure that the GPU is powered on when connector polling is 2977 * performed. Since we're calling this from a runtime PM callback, 2978 * trying to acquire rpm refs will cause us to deadlock. 2979 * 2980 * Since we're guaranteed to be holding the rpm lock, it's safe to 2981 * temporarily disable the rpm helpers so this doesn't deadlock us. 2982 */ 2983 #ifdef CONFIG_PM 2984 dev->dev->power.disable_depth++; 2985 #endif 2986 if (!amdgpu_device_has_dc_support(adev)) 2987 drm_helper_hpd_irq_event(dev); 2988 else 2989 drm_kms_helper_hotplug_event(dev); 2990 #ifdef CONFIG_PM 2991 dev->dev->power.disable_depth--; 2992 #endif 2993 adev->in_suspend = false; 2994 2995 return 0; 2996 } 2997 2998 /** 2999 * amdgpu_device_ip_check_soft_reset - did soft reset succeed 3000 * 3001 * @adev: amdgpu_device pointer 3002 * 3003 * The list of all the hardware IPs that make up the asic is walked and 3004 * the check_soft_reset callbacks are run. check_soft_reset determines 3005 * if the asic is still hung or not. 3006 * Returns true if any of the IPs are still in a hung state, false if not. 3007 */ 3008 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev) 3009 { 3010 int i; 3011 bool asic_hang = false; 3012 3013 if (amdgpu_sriov_vf(adev)) 3014 return true; 3015 3016 if (amdgpu_asic_need_full_reset(adev)) 3017 return true; 3018 3019 for (i = 0; i < adev->num_ip_blocks; i++) { 3020 if (!adev->ip_blocks[i].status.valid) 3021 continue; 3022 if (adev->ip_blocks[i].version->funcs->check_soft_reset) 3023 adev->ip_blocks[i].status.hang = 3024 adev->ip_blocks[i].version->funcs->check_soft_reset(adev); 3025 if (adev->ip_blocks[i].status.hang) { 3026 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name); 3027 asic_hang = true; 3028 } 3029 } 3030 return asic_hang; 3031 } 3032 3033 /** 3034 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset 3035 * 3036 * @adev: amdgpu_device pointer 3037 * 3038 * The list of all the hardware IPs that make up the asic is walked and the 3039 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset 3040 * handles any IP specific hardware or software state changes that are 3041 * necessary for a soft reset to succeed. 3042 * Returns 0 on success, negative error code on failure. 3043 */ 3044 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev) 3045 { 3046 int i, r = 0; 3047 3048 for (i = 0; i < adev->num_ip_blocks; i++) { 3049 if (!adev->ip_blocks[i].status.valid) 3050 continue; 3051 if (adev->ip_blocks[i].status.hang && 3052 adev->ip_blocks[i].version->funcs->pre_soft_reset) { 3053 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev); 3054 if (r) 3055 return r; 3056 } 3057 } 3058 3059 return 0; 3060 } 3061 3062 /** 3063 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed 3064 * 3065 * @adev: amdgpu_device pointer 3066 * 3067 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu 3068 * reset is necessary to recover. 3069 * Returns true if a full asic reset is required, false if not. 3070 */ 3071 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev) 3072 { 3073 int i; 3074 3075 if (amdgpu_asic_need_full_reset(adev)) 3076 return true; 3077 3078 for (i = 0; i < adev->num_ip_blocks; i++) { 3079 if (!adev->ip_blocks[i].status.valid) 3080 continue; 3081 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) || 3082 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) || 3083 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) || 3084 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) || 3085 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { 3086 if (adev->ip_blocks[i].status.hang) { 3087 DRM_INFO("Some block need full reset!\n"); 3088 return true; 3089 } 3090 } 3091 } 3092 return false; 3093 } 3094 3095 /** 3096 * amdgpu_device_ip_soft_reset - do a soft reset 3097 * 3098 * @adev: amdgpu_device pointer 3099 * 3100 * The list of all the hardware IPs that make up the asic is walked and the 3101 * soft_reset callbacks are run if the block is hung. soft_reset handles any 3102 * IP specific hardware or software state changes that are necessary to soft 3103 * reset the IP. 3104 * Returns 0 on success, negative error code on failure. 3105 */ 3106 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev) 3107 { 3108 int i, r = 0; 3109 3110 for (i = 0; i < adev->num_ip_blocks; i++) { 3111 if (!adev->ip_blocks[i].status.valid) 3112 continue; 3113 if (adev->ip_blocks[i].status.hang && 3114 adev->ip_blocks[i].version->funcs->soft_reset) { 3115 r = adev->ip_blocks[i].version->funcs->soft_reset(adev); 3116 if (r) 3117 return r; 3118 } 3119 } 3120 3121 return 0; 3122 } 3123 3124 /** 3125 * amdgpu_device_ip_post_soft_reset - clean up from soft reset 3126 * 3127 * @adev: amdgpu_device pointer 3128 * 3129 * The list of all the hardware IPs that make up the asic is walked and the 3130 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset 3131 * handles any IP specific hardware or software state changes that are 3132 * necessary after the IP has been soft reset. 3133 * Returns 0 on success, negative error code on failure. 3134 */ 3135 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev) 3136 { 3137 int i, r = 0; 3138 3139 for (i = 0; i < adev->num_ip_blocks; i++) { 3140 if (!adev->ip_blocks[i].status.valid) 3141 continue; 3142 if (adev->ip_blocks[i].status.hang && 3143 adev->ip_blocks[i].version->funcs->post_soft_reset) 3144 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev); 3145 if (r) 3146 return r; 3147 } 3148 3149 return 0; 3150 } 3151 3152 /** 3153 * amdgpu_device_recover_vram - Recover some VRAM contents 3154 * 3155 * @adev: amdgpu_device pointer 3156 * 3157 * Restores the contents of VRAM buffers from the shadows in GTT. Used to 3158 * restore things like GPUVM page tables after a GPU reset where 3159 * the contents of VRAM might be lost. 3160 * 3161 * Returns: 3162 * 0 on success, negative error code on failure. 3163 */ 3164 static int amdgpu_device_recover_vram(struct amdgpu_device *adev) 3165 { 3166 struct dma_fence *fence = NULL, *next = NULL; 3167 struct amdgpu_bo *shadow; 3168 long r = 1, tmo; 3169 3170 if (amdgpu_sriov_runtime(adev)) 3171 tmo = msecs_to_jiffies(8000); 3172 else 3173 tmo = msecs_to_jiffies(100); 3174 3175 DRM_INFO("recover vram bo from shadow start\n"); 3176 mutex_lock(&adev->shadow_list_lock); 3177 list_for_each_entry(shadow, &adev->shadow_list, shadow_list) { 3178 3179 /* No need to recover an evicted BO */ 3180 if (shadow->tbo.mem.mem_type != TTM_PL_TT || 3181 shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET || 3182 shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM) 3183 continue; 3184 3185 r = amdgpu_bo_restore_shadow(shadow, &next); 3186 if (r) 3187 break; 3188 3189 if (fence) { 3190 tmo = dma_fence_wait_timeout(fence, false, tmo); 3191 dma_fence_put(fence); 3192 fence = next; 3193 if (tmo == 0) { 3194 r = -ETIMEDOUT; 3195 break; 3196 } else if (tmo < 0) { 3197 r = tmo; 3198 break; 3199 } 3200 } else { 3201 fence = next; 3202 } 3203 } 3204 mutex_unlock(&adev->shadow_list_lock); 3205 3206 if (fence) 3207 tmo = dma_fence_wait_timeout(fence, false, tmo); 3208 dma_fence_put(fence); 3209 3210 if (r < 0 || tmo <= 0) { 3211 DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo); 3212 return -EIO; 3213 } 3214 3215 DRM_INFO("recover vram bo from shadow done\n"); 3216 return 0; 3217 } 3218 3219 3220 /** 3221 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf 3222 * 3223 * @adev: amdgpu device pointer 3224 * @from_hypervisor: request from hypervisor 3225 * 3226 * do VF FLR and reinitialize Asic 3227 * return 0 means succeeded otherwise failed 3228 */ 3229 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, 3230 bool from_hypervisor) 3231 { 3232 int r; 3233 3234 if (from_hypervisor) 3235 r = amdgpu_virt_request_full_gpu(adev, true); 3236 else 3237 r = amdgpu_virt_reset_gpu(adev); 3238 if (r) 3239 return r; 3240 3241 amdgpu_amdkfd_pre_reset(adev); 3242 3243 /* Resume IP prior to SMC */ 3244 r = amdgpu_device_ip_reinit_early_sriov(adev); 3245 if (r) 3246 goto error; 3247 3248 /* we need recover gart prior to run SMC/CP/SDMA resume */ 3249 amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]); 3250 3251 r = amdgpu_device_fw_loading(adev); 3252 if (r) 3253 return r; 3254 3255 /* now we are okay to resume SMC/CP/SDMA */ 3256 r = amdgpu_device_ip_reinit_late_sriov(adev); 3257 if (r) 3258 goto error; 3259 3260 amdgpu_irq_gpu_reset_resume_helper(adev); 3261 r = amdgpu_ib_ring_tests(adev); 3262 amdgpu_amdkfd_post_reset(adev); 3263 3264 error: 3265 amdgpu_virt_init_data_exchange(adev); 3266 amdgpu_virt_release_full_gpu(adev, true); 3267 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { 3268 atomic_inc(&adev->vram_lost_counter); 3269 r = amdgpu_device_recover_vram(adev); 3270 } 3271 3272 return r; 3273 } 3274 3275 /** 3276 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery 3277 * 3278 * @adev: amdgpu device pointer 3279 * 3280 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover 3281 * a hung GPU. 3282 */ 3283 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) 3284 { 3285 if (!amdgpu_device_ip_check_soft_reset(adev)) { 3286 DRM_INFO("Timeout, but no hardware hang detected.\n"); 3287 return false; 3288 } 3289 3290 if (amdgpu_gpu_recovery == 0) 3291 goto disabled; 3292 3293 if (amdgpu_sriov_vf(adev)) 3294 return true; 3295 3296 if (amdgpu_gpu_recovery == -1) { 3297 switch (adev->asic_type) { 3298 case CHIP_BONAIRE: 3299 case CHIP_HAWAII: 3300 case CHIP_TOPAZ: 3301 case CHIP_TONGA: 3302 case CHIP_FIJI: 3303 case CHIP_POLARIS10: 3304 case CHIP_POLARIS11: 3305 case CHIP_POLARIS12: 3306 case CHIP_VEGAM: 3307 case CHIP_VEGA20: 3308 case CHIP_VEGA10: 3309 case CHIP_VEGA12: 3310 break; 3311 default: 3312 goto disabled; 3313 } 3314 } 3315 3316 return true; 3317 3318 disabled: 3319 DRM_INFO("GPU recovery disabled.\n"); 3320 return false; 3321 } 3322 3323 3324 static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, 3325 struct amdgpu_job *job, 3326 bool *need_full_reset_arg) 3327 { 3328 int i, r = 0; 3329 bool need_full_reset = *need_full_reset_arg; 3330 3331 /* block all schedulers and reset given job's ring */ 3332 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 3333 struct amdgpu_ring *ring = adev->rings[i]; 3334 3335 if (!ring || !ring->sched.thread) 3336 continue; 3337 3338 drm_sched_stop(&ring->sched); 3339 3340 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ 3341 amdgpu_fence_driver_force_completion(ring); 3342 } 3343 3344 if(job) 3345 drm_sched_increase_karma(&job->base); 3346 3347 3348 3349 if (!amdgpu_sriov_vf(adev)) { 3350 3351 if (!need_full_reset) 3352 need_full_reset = amdgpu_device_ip_need_full_reset(adev); 3353 3354 if (!need_full_reset) { 3355 amdgpu_device_ip_pre_soft_reset(adev); 3356 r = amdgpu_device_ip_soft_reset(adev); 3357 amdgpu_device_ip_post_soft_reset(adev); 3358 if (r || amdgpu_device_ip_check_soft_reset(adev)) { 3359 DRM_INFO("soft reset failed, will fallback to full reset!\n"); 3360 need_full_reset = true; 3361 } 3362 } 3363 3364 if (need_full_reset) 3365 r = amdgpu_device_ip_suspend(adev); 3366 3367 *need_full_reset_arg = need_full_reset; 3368 } 3369 3370 return r; 3371 } 3372 3373 static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, 3374 struct list_head *device_list_handle, 3375 bool *need_full_reset_arg) 3376 { 3377 struct amdgpu_device *tmp_adev = NULL; 3378 bool need_full_reset = *need_full_reset_arg, vram_lost = false; 3379 int r = 0; 3380 3381 /* 3382 * ASIC reset has to be done on all HGMI hive nodes ASAP 3383 * to allow proper links negotiation in FW (within 1 sec) 3384 */ 3385 if (need_full_reset) { 3386 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { 3387 /* For XGMI run all resets in parallel to speed up the process */ 3388 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { 3389 if (!queue_work(system_highpri_wq, &tmp_adev->xgmi_reset_work)) 3390 r = -EALREADY; 3391 } else 3392 r = amdgpu_asic_reset(tmp_adev); 3393 3394 if (r) { 3395 DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s", 3396 r, tmp_adev->ddev->unique); 3397 break; 3398 } 3399 } 3400 3401 /* For XGMI wait for all PSP resets to complete before proceed */ 3402 if (!r) { 3403 list_for_each_entry(tmp_adev, device_list_handle, 3404 gmc.xgmi.head) { 3405 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { 3406 flush_work(&tmp_adev->xgmi_reset_work); 3407 r = tmp_adev->asic_reset_res; 3408 if (r) 3409 break; 3410 } 3411 } 3412 3413 list_for_each_entry(tmp_adev, device_list_handle, 3414 gmc.xgmi.head) { 3415 amdgpu_ras_reserve_bad_pages(tmp_adev); 3416 } 3417 } 3418 } 3419 3420 3421 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { 3422 if (need_full_reset) { 3423 /* post card */ 3424 if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context)) 3425 DRM_WARN("asic atom init failed!"); 3426 3427 if (!r) { 3428 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); 3429 r = amdgpu_device_ip_resume_phase1(tmp_adev); 3430 if (r) 3431 goto out; 3432 3433 vram_lost = amdgpu_device_check_vram_lost(tmp_adev); 3434 if (vram_lost) { 3435 DRM_INFO("VRAM is lost due to GPU reset!\n"); 3436 atomic_inc(&tmp_adev->vram_lost_counter); 3437 } 3438 3439 r = amdgpu_gtt_mgr_recover( 3440 &tmp_adev->mman.bdev.man[TTM_PL_TT]); 3441 if (r) 3442 goto out; 3443 3444 r = amdgpu_device_fw_loading(tmp_adev); 3445 if (r) 3446 return r; 3447 3448 r = amdgpu_device_ip_resume_phase2(tmp_adev); 3449 if (r) 3450 goto out; 3451 3452 if (vram_lost) 3453 amdgpu_device_fill_reset_magic(tmp_adev); 3454 3455 /* Update PSP FW topology after reset */ 3456 if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1) 3457 r = amdgpu_xgmi_update_topology(hive, tmp_adev); 3458 } 3459 } 3460 3461 3462 out: 3463 if (!r) { 3464 amdgpu_irq_gpu_reset_resume_helper(tmp_adev); 3465 r = amdgpu_ib_ring_tests(tmp_adev); 3466 if (r) { 3467 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r); 3468 r = amdgpu_device_ip_suspend(tmp_adev); 3469 need_full_reset = true; 3470 r = -EAGAIN; 3471 goto end; 3472 } 3473 } 3474 3475 if (!r) 3476 r = amdgpu_device_recover_vram(tmp_adev); 3477 else 3478 tmp_adev->asic_reset_res = r; 3479 } 3480 3481 end: 3482 *need_full_reset_arg = need_full_reset; 3483 return r; 3484 } 3485 3486 static void amdgpu_device_post_asic_reset(struct amdgpu_device *adev, 3487 struct amdgpu_job *job) 3488 { 3489 int i; 3490 3491 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 3492 struct amdgpu_ring *ring = adev->rings[i]; 3493 3494 if (!ring || !ring->sched.thread) 3495 continue; 3496 3497 if (!adev->asic_reset_res) 3498 drm_sched_resubmit_jobs(&ring->sched); 3499 3500 drm_sched_start(&ring->sched, !adev->asic_reset_res); 3501 } 3502 3503 if (!amdgpu_device_has_dc_support(adev)) { 3504 drm_helper_resume_force_mode(adev->ddev); 3505 } 3506 3507 adev->asic_reset_res = 0; 3508 } 3509 3510 static void amdgpu_device_lock_adev(struct amdgpu_device *adev) 3511 { 3512 mutex_lock(&adev->lock_reset); 3513 atomic_inc(&adev->gpu_reset_counter); 3514 adev->in_gpu_reset = 1; 3515 /* Block kfd: SRIOV would do it separately */ 3516 if (!amdgpu_sriov_vf(adev)) 3517 amdgpu_amdkfd_pre_reset(adev); 3518 } 3519 3520 static void amdgpu_device_unlock_adev(struct amdgpu_device *adev) 3521 { 3522 /*unlock kfd: SRIOV would do it separately */ 3523 if (!amdgpu_sriov_vf(adev)) 3524 amdgpu_amdkfd_post_reset(adev); 3525 amdgpu_vf_error_trans_all(adev); 3526 adev->in_gpu_reset = 0; 3527 mutex_unlock(&adev->lock_reset); 3528 } 3529 3530 3531 /** 3532 * amdgpu_device_gpu_recover - reset the asic and recover scheduler 3533 * 3534 * @adev: amdgpu device pointer 3535 * @job: which job trigger hang 3536 * 3537 * Attempt to reset the GPU if it has hung (all asics). 3538 * Attempt to do soft-reset or full-reset and reinitialize Asic 3539 * Returns 0 for success or an error on failure. 3540 */ 3541 3542 int amdgpu_device_gpu_recover(struct amdgpu_device *adev, 3543 struct amdgpu_job *job) 3544 { 3545 int r; 3546 struct amdgpu_hive_info *hive = NULL; 3547 bool need_full_reset = false; 3548 struct amdgpu_device *tmp_adev = NULL; 3549 struct list_head device_list, *device_list_handle = NULL; 3550 3551 INIT_LIST_HEAD(&device_list); 3552 3553 dev_info(adev->dev, "GPU reset begin!\n"); 3554 3555 /* 3556 * In case of XGMI hive disallow concurrent resets to be triggered 3557 * by different nodes. No point also since the one node already executing 3558 * reset will also reset all the other nodes in the hive. 3559 */ 3560 hive = amdgpu_get_xgmi_hive(adev, 0); 3561 if (hive && adev->gmc.xgmi.num_physical_nodes > 1 && 3562 !mutex_trylock(&hive->reset_lock)) 3563 return 0; 3564 3565 /* Start with adev pre asic reset first for soft reset check.*/ 3566 amdgpu_device_lock_adev(adev); 3567 r = amdgpu_device_pre_asic_reset(adev, 3568 job, 3569 &need_full_reset); 3570 if (r) { 3571 /*TODO Should we stop ?*/ 3572 DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ", 3573 r, adev->ddev->unique); 3574 adev->asic_reset_res = r; 3575 } 3576 3577 /* Build list of devices to reset */ 3578 if (need_full_reset && adev->gmc.xgmi.num_physical_nodes > 1) { 3579 if (!hive) { 3580 amdgpu_device_unlock_adev(adev); 3581 return -ENODEV; 3582 } 3583 3584 /* 3585 * In case we are in XGMI hive mode device reset is done for all the 3586 * nodes in the hive to retrain all XGMI links and hence the reset 3587 * sequence is executed in loop on all nodes. 3588 */ 3589 device_list_handle = &hive->device_list; 3590 } else { 3591 list_add_tail(&adev->gmc.xgmi.head, &device_list); 3592 device_list_handle = &device_list; 3593 } 3594 3595 retry: /* Rest of adevs pre asic reset from XGMI hive. */ 3596 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { 3597 3598 if (tmp_adev == adev) 3599 continue; 3600 3601 amdgpu_device_lock_adev(tmp_adev); 3602 r = amdgpu_device_pre_asic_reset(tmp_adev, 3603 NULL, 3604 &need_full_reset); 3605 /*TODO Should we stop ?*/ 3606 if (r) { 3607 DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ", 3608 r, tmp_adev->ddev->unique); 3609 tmp_adev->asic_reset_res = r; 3610 } 3611 } 3612 3613 /* Actual ASIC resets if needed.*/ 3614 /* TODO Implement XGMI hive reset logic for SRIOV */ 3615 if (amdgpu_sriov_vf(adev)) { 3616 r = amdgpu_device_reset_sriov(adev, job ? false : true); 3617 if (r) 3618 adev->asic_reset_res = r; 3619 } else { 3620 r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset); 3621 if (r && r == -EAGAIN) 3622 goto retry; 3623 } 3624 3625 /* Post ASIC reset for all devs .*/ 3626 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { 3627 amdgpu_device_post_asic_reset(tmp_adev, tmp_adev == adev ? job : NULL); 3628 3629 if (r) { 3630 /* bad news, how to tell it to userspace ? */ 3631 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter)); 3632 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); 3633 } else { 3634 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&adev->gpu_reset_counter)); 3635 } 3636 3637 amdgpu_device_unlock_adev(tmp_adev); 3638 } 3639 3640 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) 3641 mutex_unlock(&hive->reset_lock); 3642 3643 if (r) 3644 dev_info(adev->dev, "GPU reset end with ret = %d\n", r); 3645 return r; 3646 } 3647 3648 static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev, 3649 enum pci_bus_speed *speed, 3650 enum pcie_link_width *width) 3651 { 3652 struct pci_dev *pdev = adev->pdev; 3653 enum pci_bus_speed cur_speed; 3654 enum pcie_link_width cur_width; 3655 u32 ret = 1; 3656 3657 *speed = PCI_SPEED_UNKNOWN; 3658 *width = PCIE_LNK_WIDTH_UNKNOWN; 3659 3660 while (pdev) { 3661 cur_speed = pcie_get_speed_cap(pdev); 3662 cur_width = pcie_get_width_cap(pdev); 3663 ret = pcie_bandwidth_available(adev->pdev, NULL, 3664 NULL, &cur_width); 3665 if (!ret) 3666 cur_width = PCIE_LNK_WIDTH_RESRV; 3667 3668 if (cur_speed != PCI_SPEED_UNKNOWN) { 3669 if (*speed == PCI_SPEED_UNKNOWN) 3670 *speed = cur_speed; 3671 else if (cur_speed < *speed) 3672 *speed = cur_speed; 3673 } 3674 3675 if (cur_width != PCIE_LNK_WIDTH_UNKNOWN) { 3676 if (*width == PCIE_LNK_WIDTH_UNKNOWN) 3677 *width = cur_width; 3678 else if (cur_width < *width) 3679 *width = cur_width; 3680 } 3681 pdev = pci_upstream_bridge(pdev); 3682 } 3683 } 3684 3685 /** 3686 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot 3687 * 3688 * @adev: amdgpu_device pointer 3689 * 3690 * Fetchs and stores in the driver the PCIE capabilities (gen speed 3691 * and lanes) of the slot the device is in. Handles APUs and 3692 * virtualized environments where PCIE config space may not be available. 3693 */ 3694 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) 3695 { 3696 struct pci_dev *pdev; 3697 enum pci_bus_speed speed_cap, platform_speed_cap; 3698 enum pcie_link_width platform_link_width; 3699 3700 if (amdgpu_pcie_gen_cap) 3701 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap; 3702 3703 if (amdgpu_pcie_lane_cap) 3704 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap; 3705 3706 /* covers APUs as well */ 3707 if (pci_is_root_bus(adev->pdev->bus)) { 3708 if (adev->pm.pcie_gen_mask == 0) 3709 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK; 3710 if (adev->pm.pcie_mlw_mask == 0) 3711 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK; 3712 return; 3713 } 3714 3715 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask) 3716 return; 3717 3718 amdgpu_device_get_min_pci_speed_width(adev, &platform_speed_cap, 3719 &platform_link_width); 3720 3721 if (adev->pm.pcie_gen_mask == 0) { 3722 /* asic caps */ 3723 pdev = adev->pdev; 3724 speed_cap = pcie_get_speed_cap(pdev); 3725 if (speed_cap == PCI_SPEED_UNKNOWN) { 3726 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 3727 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 3728 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); 3729 } else { 3730 if (speed_cap == PCIE_SPEED_16_0GT) 3731 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 3732 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 3733 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 | 3734 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4); 3735 else if (speed_cap == PCIE_SPEED_8_0GT) 3736 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 3737 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 3738 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); 3739 else if (speed_cap == PCIE_SPEED_5_0GT) 3740 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 3741 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2); 3742 else 3743 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1; 3744 } 3745 /* platform caps */ 3746 if (platform_speed_cap == PCI_SPEED_UNKNOWN) { 3747 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 3748 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2); 3749 } else { 3750 if (platform_speed_cap == PCIE_SPEED_16_0GT) 3751 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 3752 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 3753 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 | 3754 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4); 3755 else if (platform_speed_cap == PCIE_SPEED_8_0GT) 3756 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 3757 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 3758 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3); 3759 else if (platform_speed_cap == PCIE_SPEED_5_0GT) 3760 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 3761 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2); 3762 else 3763 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; 3764 3765 } 3766 } 3767 if (adev->pm.pcie_mlw_mask == 0) { 3768 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) { 3769 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK; 3770 } else { 3771 switch (platform_link_width) { 3772 case PCIE_LNK_X32: 3773 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | 3774 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 3775 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 3776 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 3777 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 3778 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 3779 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 3780 break; 3781 case PCIE_LNK_X16: 3782 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 3783 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 3784 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 3785 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 3786 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 3787 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 3788 break; 3789 case PCIE_LNK_X12: 3790 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 3791 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 3792 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 3793 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 3794 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 3795 break; 3796 case PCIE_LNK_X8: 3797 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 3798 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 3799 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 3800 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 3801 break; 3802 case PCIE_LNK_X4: 3803 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 3804 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 3805 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 3806 break; 3807 case PCIE_LNK_X2: 3808 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 3809 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 3810 break; 3811 case PCIE_LNK_X1: 3812 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; 3813 break; 3814 default: 3815 break; 3816 } 3817 } 3818 } 3819 } 3820 3821