1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/kthread.h> 29 #include <linux/console.h> 30 #include <linux/slab.h> 31 #include <linux/debugfs.h> 32 #include <drm/drmP.h> 33 #include <drm/drm_crtc_helper.h> 34 #include <drm/drm_atomic_helper.h> 35 #include <drm/amdgpu_drm.h> 36 #include <linux/vgaarb.h> 37 #include <linux/vga_switcheroo.h> 38 #include <linux/efi.h> 39 #include "amdgpu.h" 40 #include "amdgpu_trace.h" 41 #include "amdgpu_i2c.h" 42 #include "atom.h" 43 #include "amdgpu_atombios.h" 44 #include "amdgpu_atomfirmware.h" 45 #include "amd_pcie.h" 46 #ifdef CONFIG_DRM_AMDGPU_SI 47 #include "si.h" 48 #endif 49 #ifdef CONFIG_DRM_AMDGPU_CIK 50 #include "cik.h" 51 #endif 52 #include "vi.h" 53 #include "soc15.h" 54 #include "bif/bif_4_1_d.h" 55 #include <linux/pci.h> 56 #include <linux/firmware.h> 57 #include "amdgpu_vf_error.h" 58 59 #include "amdgpu_amdkfd.h" 60 61 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); 62 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); 63 64 #define AMDGPU_RESUME_MS 2000 65 66 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); 67 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev); 68 static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev); 69 static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev); 70 71 static const char *amdgpu_asic_name[] = { 72 "TAHITI", 73 "PITCAIRN", 74 "VERDE", 75 "OLAND", 76 "HAINAN", 77 "BONAIRE", 78 "KAVERI", 79 "KABINI", 80 "HAWAII", 81 "MULLINS", 82 "TOPAZ", 83 "TONGA", 84 "FIJI", 85 "CARRIZO", 86 "STONEY", 87 "POLARIS10", 88 "POLARIS11", 89 "POLARIS12", 90 "VEGA10", 91 "RAVEN", 92 "LAST", 93 }; 94 95 bool amdgpu_device_is_px(struct drm_device *dev) 96 { 97 struct amdgpu_device *adev = dev->dev_private; 98 99 if (adev->flags & AMD_IS_PX) 100 return true; 101 return false; 102 } 103 104 /* 105 * MMIO register access helper functions. 106 */ 107 uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, 108 uint32_t acc_flags) 109 { 110 uint32_t ret; 111 112 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) { 113 BUG_ON(in_interrupt()); 114 return amdgpu_virt_kiq_rreg(adev, reg); 115 } 116 117 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) 118 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); 119 else { 120 unsigned long flags; 121 122 spin_lock_irqsave(&adev->mmio_idx_lock, flags); 123 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); 124 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); 125 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); 126 } 127 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret); 128 return ret; 129 } 130 131 void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, 132 uint32_t acc_flags) 133 { 134 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); 135 136 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { 137 adev->last_mm_index = v; 138 } 139 140 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) { 141 BUG_ON(in_interrupt()); 142 return amdgpu_virt_kiq_wreg(adev, reg, v); 143 } 144 145 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) 146 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); 147 else { 148 unsigned long flags; 149 150 spin_lock_irqsave(&adev->mmio_idx_lock, flags); 151 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); 152 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); 153 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); 154 } 155 156 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { 157 udelay(500); 158 } 159 } 160 161 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg) 162 { 163 if ((reg * 4) < adev->rio_mem_size) 164 return ioread32(adev->rio_mem + (reg * 4)); 165 else { 166 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4)); 167 return ioread32(adev->rio_mem + (mmMM_DATA * 4)); 168 } 169 } 170 171 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 172 { 173 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { 174 adev->last_mm_index = v; 175 } 176 177 if ((reg * 4) < adev->rio_mem_size) 178 iowrite32(v, adev->rio_mem + (reg * 4)); 179 else { 180 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4)); 181 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4)); 182 } 183 184 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { 185 udelay(500); 186 } 187 } 188 189 /** 190 * amdgpu_mm_rdoorbell - read a doorbell dword 191 * 192 * @adev: amdgpu_device pointer 193 * @index: doorbell index 194 * 195 * Returns the value in the doorbell aperture at the 196 * requested doorbell index (CIK). 197 */ 198 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index) 199 { 200 if (index < adev->doorbell.num_doorbells) { 201 return readl(adev->doorbell.ptr + index); 202 } else { 203 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); 204 return 0; 205 } 206 } 207 208 /** 209 * amdgpu_mm_wdoorbell - write a doorbell dword 210 * 211 * @adev: amdgpu_device pointer 212 * @index: doorbell index 213 * @v: value to write 214 * 215 * Writes @v to the doorbell aperture at the 216 * requested doorbell index (CIK). 217 */ 218 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v) 219 { 220 if (index < adev->doorbell.num_doorbells) { 221 writel(v, adev->doorbell.ptr + index); 222 } else { 223 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); 224 } 225 } 226 227 /** 228 * amdgpu_mm_rdoorbell64 - read a doorbell Qword 229 * 230 * @adev: amdgpu_device pointer 231 * @index: doorbell index 232 * 233 * Returns the value in the doorbell aperture at the 234 * requested doorbell index (VEGA10+). 235 */ 236 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index) 237 { 238 if (index < adev->doorbell.num_doorbells) { 239 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index)); 240 } else { 241 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); 242 return 0; 243 } 244 } 245 246 /** 247 * amdgpu_mm_wdoorbell64 - write a doorbell Qword 248 * 249 * @adev: amdgpu_device pointer 250 * @index: doorbell index 251 * @v: value to write 252 * 253 * Writes @v to the doorbell aperture at the 254 * requested doorbell index (VEGA10+). 255 */ 256 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v) 257 { 258 if (index < adev->doorbell.num_doorbells) { 259 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v); 260 } else { 261 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); 262 } 263 } 264 265 /** 266 * amdgpu_invalid_rreg - dummy reg read function 267 * 268 * @adev: amdgpu device pointer 269 * @reg: offset of register 270 * 271 * Dummy register read function. Used for register blocks 272 * that certain asics don't have (all asics). 273 * Returns the value in the register. 274 */ 275 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg) 276 { 277 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); 278 BUG(); 279 return 0; 280 } 281 282 /** 283 * amdgpu_invalid_wreg - dummy reg write function 284 * 285 * @adev: amdgpu device pointer 286 * @reg: offset of register 287 * @v: value to write to the register 288 * 289 * Dummy register read function. Used for register blocks 290 * that certain asics don't have (all asics). 291 */ 292 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) 293 { 294 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", 295 reg, v); 296 BUG(); 297 } 298 299 /** 300 * amdgpu_block_invalid_rreg - dummy reg read function 301 * 302 * @adev: amdgpu device pointer 303 * @block: offset of instance 304 * @reg: offset of register 305 * 306 * Dummy register read function. Used for register blocks 307 * that certain asics don't have (all asics). 308 * Returns the value in the register. 309 */ 310 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev, 311 uint32_t block, uint32_t reg) 312 { 313 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n", 314 reg, block); 315 BUG(); 316 return 0; 317 } 318 319 /** 320 * amdgpu_block_invalid_wreg - dummy reg write function 321 * 322 * @adev: amdgpu device pointer 323 * @block: offset of instance 324 * @reg: offset of register 325 * @v: value to write to the register 326 * 327 * Dummy register read function. Used for register blocks 328 * that certain asics don't have (all asics). 329 */ 330 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev, 331 uint32_t block, 332 uint32_t reg, uint32_t v) 333 { 334 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n", 335 reg, block, v); 336 BUG(); 337 } 338 339 static int amdgpu_vram_scratch_init(struct amdgpu_device *adev) 340 { 341 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, 342 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 343 &adev->vram_scratch.robj, 344 &adev->vram_scratch.gpu_addr, 345 (void **)&adev->vram_scratch.ptr); 346 } 347 348 static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev) 349 { 350 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL); 351 } 352 353 /** 354 * amdgpu_program_register_sequence - program an array of registers. 355 * 356 * @adev: amdgpu_device pointer 357 * @registers: pointer to the register array 358 * @array_size: size of the register array 359 * 360 * Programs an array or registers with and and or masks. 361 * This is a helper for setting golden registers. 362 */ 363 void amdgpu_program_register_sequence(struct amdgpu_device *adev, 364 const u32 *registers, 365 const u32 array_size) 366 { 367 u32 tmp, reg, and_mask, or_mask; 368 int i; 369 370 if (array_size % 3) 371 return; 372 373 for (i = 0; i < array_size; i +=3) { 374 reg = registers[i + 0]; 375 and_mask = registers[i + 1]; 376 or_mask = registers[i + 2]; 377 378 if (and_mask == 0xffffffff) { 379 tmp = or_mask; 380 } else { 381 tmp = RREG32(reg); 382 tmp &= ~and_mask; 383 tmp |= or_mask; 384 } 385 WREG32(reg, tmp); 386 } 387 } 388 389 void amdgpu_pci_config_reset(struct amdgpu_device *adev) 390 { 391 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA); 392 } 393 394 /* 395 * GPU doorbell aperture helpers function. 396 */ 397 /** 398 * amdgpu_doorbell_init - Init doorbell driver information. 399 * 400 * @adev: amdgpu_device pointer 401 * 402 * Init doorbell driver information (CIK) 403 * Returns 0 on success, error on failure. 404 */ 405 static int amdgpu_doorbell_init(struct amdgpu_device *adev) 406 { 407 /* No doorbell on SI hardware generation */ 408 if (adev->asic_type < CHIP_BONAIRE) { 409 adev->doorbell.base = 0; 410 adev->doorbell.size = 0; 411 adev->doorbell.num_doorbells = 0; 412 adev->doorbell.ptr = NULL; 413 return 0; 414 } 415 416 /* doorbell bar mapping */ 417 adev->doorbell.base = pci_resource_start(adev->pdev, 2); 418 adev->doorbell.size = pci_resource_len(adev->pdev, 2); 419 420 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32), 421 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1); 422 if (adev->doorbell.num_doorbells == 0) 423 return -EINVAL; 424 425 adev->doorbell.ptr = ioremap(adev->doorbell.base, 426 adev->doorbell.num_doorbells * 427 sizeof(u32)); 428 if (adev->doorbell.ptr == NULL) 429 return -ENOMEM; 430 431 return 0; 432 } 433 434 /** 435 * amdgpu_doorbell_fini - Tear down doorbell driver information. 436 * 437 * @adev: amdgpu_device pointer 438 * 439 * Tear down doorbell driver information (CIK) 440 */ 441 static void amdgpu_doorbell_fini(struct amdgpu_device *adev) 442 { 443 iounmap(adev->doorbell.ptr); 444 adev->doorbell.ptr = NULL; 445 } 446 447 /** 448 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to 449 * setup amdkfd 450 * 451 * @adev: amdgpu_device pointer 452 * @aperture_base: output returning doorbell aperture base physical address 453 * @aperture_size: output returning doorbell aperture size in bytes 454 * @start_offset: output returning # of doorbell bytes reserved for amdgpu. 455 * 456 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up, 457 * takes doorbells required for its own rings and reports the setup to amdkfd. 458 * amdgpu reserved doorbells are at the start of the doorbell aperture. 459 */ 460 void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, 461 phys_addr_t *aperture_base, 462 size_t *aperture_size, 463 size_t *start_offset) 464 { 465 /* 466 * The first num_doorbells are used by amdgpu. 467 * amdkfd takes whatever's left in the aperture. 468 */ 469 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) { 470 *aperture_base = adev->doorbell.base; 471 *aperture_size = adev->doorbell.size; 472 *start_offset = adev->doorbell.num_doorbells * sizeof(u32); 473 } else { 474 *aperture_base = 0; 475 *aperture_size = 0; 476 *start_offset = 0; 477 } 478 } 479 480 /* 481 * amdgpu_wb_*() 482 * Writeback is the method by which the GPU updates special pages in memory 483 * with the status of certain GPU events (fences, ring pointers,etc.). 484 */ 485 486 /** 487 * amdgpu_wb_fini - Disable Writeback and free memory 488 * 489 * @adev: amdgpu_device pointer 490 * 491 * Disables Writeback and frees the Writeback memory (all asics). 492 * Used at driver shutdown. 493 */ 494 static void amdgpu_wb_fini(struct amdgpu_device *adev) 495 { 496 if (adev->wb.wb_obj) { 497 amdgpu_bo_free_kernel(&adev->wb.wb_obj, 498 &adev->wb.gpu_addr, 499 (void **)&adev->wb.wb); 500 adev->wb.wb_obj = NULL; 501 } 502 } 503 504 /** 505 * amdgpu_wb_init- Init Writeback driver info and allocate memory 506 * 507 * @adev: amdgpu_device pointer 508 * 509 * Initializes writeback and allocates writeback memory (all asics). 510 * Used at driver startup. 511 * Returns 0 on success or an -error on failure. 512 */ 513 static int amdgpu_wb_init(struct amdgpu_device *adev) 514 { 515 int r; 516 517 if (adev->wb.wb_obj == NULL) { 518 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */ 519 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8, 520 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 521 &adev->wb.wb_obj, &adev->wb.gpu_addr, 522 (void **)&adev->wb.wb); 523 if (r) { 524 dev_warn(adev->dev, "(%d) create WB bo failed\n", r); 525 return r; 526 } 527 528 adev->wb.num_wb = AMDGPU_MAX_WB; 529 memset(&adev->wb.used, 0, sizeof(adev->wb.used)); 530 531 /* clear wb memory */ 532 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t)); 533 } 534 535 return 0; 536 } 537 538 /** 539 * amdgpu_wb_get - Allocate a wb entry 540 * 541 * @adev: amdgpu_device pointer 542 * @wb: wb index 543 * 544 * Allocate a wb slot for use by the driver (all asics). 545 * Returns 0 on success or -EINVAL on failure. 546 */ 547 int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb) 548 { 549 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); 550 551 if (offset < adev->wb.num_wb) { 552 __set_bit(offset, adev->wb.used); 553 *wb = offset * 8; /* convert to dw offset */ 554 return 0; 555 } else { 556 return -EINVAL; 557 } 558 } 559 560 /** 561 * amdgpu_wb_free - Free a wb entry 562 * 563 * @adev: amdgpu_device pointer 564 * @wb: wb index 565 * 566 * Free a wb slot allocated for use by the driver (all asics) 567 */ 568 void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb) 569 { 570 if (wb < adev->wb.num_wb) 571 __clear_bit(wb, adev->wb.used); 572 } 573 574 /** 575 * amdgpu_vram_location - try to find VRAM location 576 * @adev: amdgpu device structure holding all necessary informations 577 * @mc: memory controller structure holding memory informations 578 * @base: base address at which to put VRAM 579 * 580 * Function will try to place VRAM at base address provided 581 * as parameter (which is so far either PCI aperture address or 582 * for IGP TOM base address). 583 * 584 * If there is not enough space to fit the unvisible VRAM in the 32bits 585 * address space then we limit the VRAM size to the aperture. 586 * 587 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size, 588 * this shouldn't be a problem as we are using the PCI aperture as a reference. 589 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but 590 * not IGP. 591 * 592 * Note: we use mc_vram_size as on some board we need to program the mc to 593 * cover the whole aperture even if VRAM size is inferior to aperture size 594 * Novell bug 204882 + along with lots of ubuntu ones 595 * 596 * Note: when limiting vram it's safe to overwritte real_vram_size because 597 * we are not in case where real_vram_size is inferior to mc_vram_size (ie 598 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu 599 * ones) 600 * 601 * Note: IGP TOM addr should be the same as the aperture addr, we don't 602 * explicitly check for that though. 603 * 604 * FIXME: when reducing VRAM size align new size on power of 2. 605 */ 606 void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base) 607 { 608 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20; 609 610 mc->vram_start = base; 611 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) { 612 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n"); 613 mc->real_vram_size = mc->aper_size; 614 mc->mc_vram_size = mc->aper_size; 615 } 616 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 617 if (limit && limit < mc->real_vram_size) 618 mc->real_vram_size = limit; 619 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", 620 mc->mc_vram_size >> 20, mc->vram_start, 621 mc->vram_end, mc->real_vram_size >> 20); 622 } 623 624 /** 625 * amdgpu_gart_location - try to find GTT location 626 * @adev: amdgpu device structure holding all necessary informations 627 * @mc: memory controller structure holding memory informations 628 * 629 * Function will place try to place GTT before or after VRAM. 630 * 631 * If GTT size is bigger than space left then we ajust GTT size. 632 * Thus function will never fails. 633 * 634 * FIXME: when reducing GTT size align new size on power of 2. 635 */ 636 void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) 637 { 638 u64 size_af, size_bf; 639 640 size_af = adev->mc.mc_mask - mc->vram_end; 641 size_bf = mc->vram_start; 642 if (size_bf > size_af) { 643 if (mc->gart_size > size_bf) { 644 dev_warn(adev->dev, "limiting GTT\n"); 645 mc->gart_size = size_bf; 646 } 647 mc->gart_start = 0; 648 } else { 649 if (mc->gart_size > size_af) { 650 dev_warn(adev->dev, "limiting GTT\n"); 651 mc->gart_size = size_af; 652 } 653 mc->gart_start = mc->vram_end + 1; 654 } 655 mc->gart_end = mc->gart_start + mc->gart_size - 1; 656 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", 657 mc->gart_size >> 20, mc->gart_start, mc->gart_end); 658 } 659 660 /* 661 * GPU helpers function. 662 */ 663 /** 664 * amdgpu_need_post - check if the hw need post or not 665 * 666 * @adev: amdgpu_device pointer 667 * 668 * Check if the asic has been initialized (all asics) at driver startup 669 * or post is needed if hw reset is performed. 670 * Returns true if need or false if not. 671 */ 672 bool amdgpu_need_post(struct amdgpu_device *adev) 673 { 674 uint32_t reg; 675 676 if (adev->has_hw_reset) { 677 adev->has_hw_reset = false; 678 return true; 679 } 680 681 /* bios scratch used on CIK+ */ 682 if (adev->asic_type >= CHIP_BONAIRE) 683 return amdgpu_atombios_scratch_need_asic_init(adev); 684 685 /* check MEM_SIZE for older asics */ 686 reg = amdgpu_asic_get_config_memsize(adev); 687 688 if ((reg != 0) && (reg != 0xffffffff)) 689 return false; 690 691 return true; 692 693 } 694 695 static bool amdgpu_vpost_needed(struct amdgpu_device *adev) 696 { 697 if (amdgpu_sriov_vf(adev)) 698 return false; 699 700 if (amdgpu_passthrough(adev)) { 701 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot 702 * some old smc fw still need driver do vPost otherwise gpu hang, while 703 * those smc fw version above 22.15 doesn't have this flaw, so we force 704 * vpost executed for smc version below 22.15 705 */ 706 if (adev->asic_type == CHIP_FIJI) { 707 int err; 708 uint32_t fw_ver; 709 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev); 710 /* force vPost if error occured */ 711 if (err) 712 return true; 713 714 fw_ver = *((uint32_t *)adev->pm.fw->data + 69); 715 if (fw_ver < 0x00160e00) 716 return true; 717 } 718 } 719 return amdgpu_need_post(adev); 720 } 721 722 /** 723 * amdgpu_dummy_page_init - init dummy page used by the driver 724 * 725 * @adev: amdgpu_device pointer 726 * 727 * Allocate the dummy page used by the driver (all asics). 728 * This dummy page is used by the driver as a filler for gart entries 729 * when pages are taken out of the GART 730 * Returns 0 on sucess, -ENOMEM on failure. 731 */ 732 int amdgpu_dummy_page_init(struct amdgpu_device *adev) 733 { 734 if (adev->dummy_page.page) 735 return 0; 736 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); 737 if (adev->dummy_page.page == NULL) 738 return -ENOMEM; 739 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page, 740 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 741 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) { 742 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n"); 743 __free_page(adev->dummy_page.page); 744 adev->dummy_page.page = NULL; 745 return -ENOMEM; 746 } 747 return 0; 748 } 749 750 /** 751 * amdgpu_dummy_page_fini - free dummy page used by the driver 752 * 753 * @adev: amdgpu_device pointer 754 * 755 * Frees the dummy page used by the driver (all asics). 756 */ 757 void amdgpu_dummy_page_fini(struct amdgpu_device *adev) 758 { 759 if (adev->dummy_page.page == NULL) 760 return; 761 pci_unmap_page(adev->pdev, adev->dummy_page.addr, 762 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 763 __free_page(adev->dummy_page.page); 764 adev->dummy_page.page = NULL; 765 } 766 767 768 /* ATOM accessor methods */ 769 /* 770 * ATOM is an interpreted byte code stored in tables in the vbios. The 771 * driver registers callbacks to access registers and the interpreter 772 * in the driver parses the tables and executes then to program specific 773 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c, 774 * atombios.h, and atom.c 775 */ 776 777 /** 778 * cail_pll_read - read PLL register 779 * 780 * @info: atom card_info pointer 781 * @reg: PLL register offset 782 * 783 * Provides a PLL register accessor for the atom interpreter (r4xx+). 784 * Returns the value of the PLL register. 785 */ 786 static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) 787 { 788 return 0; 789 } 790 791 /** 792 * cail_pll_write - write PLL register 793 * 794 * @info: atom card_info pointer 795 * @reg: PLL register offset 796 * @val: value to write to the pll register 797 * 798 * Provides a PLL register accessor for the atom interpreter (r4xx+). 799 */ 800 static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) 801 { 802 803 } 804 805 /** 806 * cail_mc_read - read MC (Memory Controller) register 807 * 808 * @info: atom card_info pointer 809 * @reg: MC register offset 810 * 811 * Provides an MC register accessor for the atom interpreter (r4xx+). 812 * Returns the value of the MC register. 813 */ 814 static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) 815 { 816 return 0; 817 } 818 819 /** 820 * cail_mc_write - write MC (Memory Controller) register 821 * 822 * @info: atom card_info pointer 823 * @reg: MC register offset 824 * @val: value to write to the pll register 825 * 826 * Provides a MC register accessor for the atom interpreter (r4xx+). 827 */ 828 static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) 829 { 830 831 } 832 833 /** 834 * cail_reg_write - write MMIO register 835 * 836 * @info: atom card_info pointer 837 * @reg: MMIO register offset 838 * @val: value to write to the pll register 839 * 840 * Provides a MMIO register accessor for the atom interpreter (r4xx+). 841 */ 842 static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) 843 { 844 struct amdgpu_device *adev = info->dev->dev_private; 845 846 WREG32(reg, val); 847 } 848 849 /** 850 * cail_reg_read - read MMIO register 851 * 852 * @info: atom card_info pointer 853 * @reg: MMIO register offset 854 * 855 * Provides an MMIO register accessor for the atom interpreter (r4xx+). 856 * Returns the value of the MMIO register. 857 */ 858 static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) 859 { 860 struct amdgpu_device *adev = info->dev->dev_private; 861 uint32_t r; 862 863 r = RREG32(reg); 864 return r; 865 } 866 867 /** 868 * cail_ioreg_write - write IO register 869 * 870 * @info: atom card_info pointer 871 * @reg: IO register offset 872 * @val: value to write to the pll register 873 * 874 * Provides a IO register accessor for the atom interpreter (r4xx+). 875 */ 876 static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val) 877 { 878 struct amdgpu_device *adev = info->dev->dev_private; 879 880 WREG32_IO(reg, val); 881 } 882 883 /** 884 * cail_ioreg_read - read IO register 885 * 886 * @info: atom card_info pointer 887 * @reg: IO register offset 888 * 889 * Provides an IO register accessor for the atom interpreter (r4xx+). 890 * Returns the value of the IO register. 891 */ 892 static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) 893 { 894 struct amdgpu_device *adev = info->dev->dev_private; 895 uint32_t r; 896 897 r = RREG32_IO(reg); 898 return r; 899 } 900 901 static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev, 902 struct device_attribute *attr, 903 char *buf) 904 { 905 struct drm_device *ddev = dev_get_drvdata(dev); 906 struct amdgpu_device *adev = ddev->dev_private; 907 struct atom_context *ctx = adev->mode_info.atom_context; 908 909 return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version); 910 } 911 912 static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version, 913 NULL); 914 915 /** 916 * amdgpu_atombios_fini - free the driver info and callbacks for atombios 917 * 918 * @adev: amdgpu_device pointer 919 * 920 * Frees the driver info and register access callbacks for the ATOM 921 * interpreter (r4xx+). 922 * Called at driver shutdown. 923 */ 924 static void amdgpu_atombios_fini(struct amdgpu_device *adev) 925 { 926 if (adev->mode_info.atom_context) { 927 kfree(adev->mode_info.atom_context->scratch); 928 kfree(adev->mode_info.atom_context->iio); 929 } 930 kfree(adev->mode_info.atom_context); 931 adev->mode_info.atom_context = NULL; 932 kfree(adev->mode_info.atom_card_info); 933 adev->mode_info.atom_card_info = NULL; 934 device_remove_file(adev->dev, &dev_attr_vbios_version); 935 } 936 937 /** 938 * amdgpu_atombios_init - init the driver info and callbacks for atombios 939 * 940 * @adev: amdgpu_device pointer 941 * 942 * Initializes the driver info and register access callbacks for the 943 * ATOM interpreter (r4xx+). 944 * Returns 0 on sucess, -ENOMEM on failure. 945 * Called at driver startup. 946 */ 947 static int amdgpu_atombios_init(struct amdgpu_device *adev) 948 { 949 struct card_info *atom_card_info = 950 kzalloc(sizeof(struct card_info), GFP_KERNEL); 951 int ret; 952 953 if (!atom_card_info) 954 return -ENOMEM; 955 956 adev->mode_info.atom_card_info = atom_card_info; 957 atom_card_info->dev = adev->ddev; 958 atom_card_info->reg_read = cail_reg_read; 959 atom_card_info->reg_write = cail_reg_write; 960 /* needed for iio ops */ 961 if (adev->rio_mem) { 962 atom_card_info->ioreg_read = cail_ioreg_read; 963 atom_card_info->ioreg_write = cail_ioreg_write; 964 } else { 965 DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n"); 966 atom_card_info->ioreg_read = cail_reg_read; 967 atom_card_info->ioreg_write = cail_reg_write; 968 } 969 atom_card_info->mc_read = cail_mc_read; 970 atom_card_info->mc_write = cail_mc_write; 971 atom_card_info->pll_read = cail_pll_read; 972 atom_card_info->pll_write = cail_pll_write; 973 974 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios); 975 if (!adev->mode_info.atom_context) { 976 amdgpu_atombios_fini(adev); 977 return -ENOMEM; 978 } 979 980 mutex_init(&adev->mode_info.atom_context->mutex); 981 if (adev->is_atom_fw) { 982 amdgpu_atomfirmware_scratch_regs_init(adev); 983 amdgpu_atomfirmware_allocate_fb_scratch(adev); 984 } else { 985 amdgpu_atombios_scratch_regs_init(adev); 986 amdgpu_atombios_allocate_fb_scratch(adev); 987 } 988 989 ret = device_create_file(adev->dev, &dev_attr_vbios_version); 990 if (ret) { 991 DRM_ERROR("Failed to create device file for VBIOS version\n"); 992 return ret; 993 } 994 995 return 0; 996 } 997 998 /* if we get transitioned to only one device, take VGA back */ 999 /** 1000 * amdgpu_vga_set_decode - enable/disable vga decode 1001 * 1002 * @cookie: amdgpu_device pointer 1003 * @state: enable/disable vga decode 1004 * 1005 * Enable/disable vga decode (all asics). 1006 * Returns VGA resource flags. 1007 */ 1008 static unsigned int amdgpu_vga_set_decode(void *cookie, bool state) 1009 { 1010 struct amdgpu_device *adev = cookie; 1011 amdgpu_asic_set_vga_state(adev, state); 1012 if (state) 1013 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 1014 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1015 else 1016 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1017 } 1018 1019 static void amdgpu_check_block_size(struct amdgpu_device *adev) 1020 { 1021 /* defines number of bits in page table versus page directory, 1022 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the 1023 * page table and the remaining bits are in the page directory */ 1024 if (amdgpu_vm_block_size == -1) 1025 return; 1026 1027 if (amdgpu_vm_block_size < 9) { 1028 dev_warn(adev->dev, "VM page table size (%d) too small\n", 1029 amdgpu_vm_block_size); 1030 goto def_value; 1031 } 1032 1033 if (amdgpu_vm_block_size > 24 || 1034 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) { 1035 dev_warn(adev->dev, "VM page table size (%d) too large\n", 1036 amdgpu_vm_block_size); 1037 goto def_value; 1038 } 1039 1040 return; 1041 1042 def_value: 1043 amdgpu_vm_block_size = -1; 1044 } 1045 1046 static void amdgpu_check_vm_size(struct amdgpu_device *adev) 1047 { 1048 /* no need to check the default value */ 1049 if (amdgpu_vm_size == -1) 1050 return; 1051 1052 if (!is_power_of_2(amdgpu_vm_size)) { 1053 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n", 1054 amdgpu_vm_size); 1055 goto def_value; 1056 } 1057 1058 if (amdgpu_vm_size < 1) { 1059 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n", 1060 amdgpu_vm_size); 1061 goto def_value; 1062 } 1063 1064 /* 1065 * Max GPUVM size for Cayman, SI, CI VI are 40 bits. 1066 */ 1067 if (amdgpu_vm_size > 1024) { 1068 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n", 1069 amdgpu_vm_size); 1070 goto def_value; 1071 } 1072 1073 return; 1074 1075 def_value: 1076 amdgpu_vm_size = -1; 1077 } 1078 1079 /** 1080 * amdgpu_check_arguments - validate module params 1081 * 1082 * @adev: amdgpu_device pointer 1083 * 1084 * Validates certain module parameters and updates 1085 * the associated values used by the driver (all asics). 1086 */ 1087 static void amdgpu_check_arguments(struct amdgpu_device *adev) 1088 { 1089 if (amdgpu_sched_jobs < 4) { 1090 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", 1091 amdgpu_sched_jobs); 1092 amdgpu_sched_jobs = 4; 1093 } else if (!is_power_of_2(amdgpu_sched_jobs)){ 1094 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n", 1095 amdgpu_sched_jobs); 1096 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); 1097 } 1098 1099 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) { 1100 /* gart size must be greater or equal to 32M */ 1101 dev_warn(adev->dev, "gart size (%d) too small\n", 1102 amdgpu_gart_size); 1103 amdgpu_gart_size = -1; 1104 } 1105 1106 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) { 1107 /* gtt size must be greater or equal to 32M */ 1108 dev_warn(adev->dev, "gtt size (%d) too small\n", 1109 amdgpu_gtt_size); 1110 amdgpu_gtt_size = -1; 1111 } 1112 1113 /* valid range is between 4 and 9 inclusive */ 1114 if (amdgpu_vm_fragment_size != -1 && 1115 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) { 1116 dev_warn(adev->dev, "valid range is between 4 and 9\n"); 1117 amdgpu_vm_fragment_size = -1; 1118 } 1119 1120 amdgpu_check_vm_size(adev); 1121 1122 amdgpu_check_block_size(adev); 1123 1124 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 || 1125 !is_power_of_2(amdgpu_vram_page_split))) { 1126 dev_warn(adev->dev, "invalid VRAM page split (%d)\n", 1127 amdgpu_vram_page_split); 1128 amdgpu_vram_page_split = 1024; 1129 } 1130 } 1131 1132 /** 1133 * amdgpu_switcheroo_set_state - set switcheroo state 1134 * 1135 * @pdev: pci dev pointer 1136 * @state: vga_switcheroo state 1137 * 1138 * Callback for the switcheroo driver. Suspends or resumes the 1139 * the asics before or after it is powered up using ACPI methods. 1140 */ 1141 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 1142 { 1143 struct drm_device *dev = pci_get_drvdata(pdev); 1144 1145 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF) 1146 return; 1147 1148 if (state == VGA_SWITCHEROO_ON) { 1149 pr_info("amdgpu: switched on\n"); 1150 /* don't suspend or resume card normally */ 1151 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1152 1153 amdgpu_device_resume(dev, true, true); 1154 1155 dev->switch_power_state = DRM_SWITCH_POWER_ON; 1156 drm_kms_helper_poll_enable(dev); 1157 } else { 1158 pr_info("amdgpu: switched off\n"); 1159 drm_kms_helper_poll_disable(dev); 1160 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1161 amdgpu_device_suspend(dev, true, true); 1162 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 1163 } 1164 } 1165 1166 /** 1167 * amdgpu_switcheroo_can_switch - see if switcheroo state can change 1168 * 1169 * @pdev: pci dev pointer 1170 * 1171 * Callback for the switcheroo driver. Check of the switcheroo 1172 * state can be changed. 1173 * Returns true if the state can be changed, false if not. 1174 */ 1175 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev) 1176 { 1177 struct drm_device *dev = pci_get_drvdata(pdev); 1178 1179 /* 1180 * FIXME: open_count is protected by drm_global_mutex but that would lead to 1181 * locking inversion with the driver load path. And the access here is 1182 * completely racy anyway. So don't bother with locking for now. 1183 */ 1184 return dev->open_count == 0; 1185 } 1186 1187 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = { 1188 .set_gpu_state = amdgpu_switcheroo_set_state, 1189 .reprobe = NULL, 1190 .can_switch = amdgpu_switcheroo_can_switch, 1191 }; 1192 1193 int amdgpu_set_clockgating_state(struct amdgpu_device *adev, 1194 enum amd_ip_block_type block_type, 1195 enum amd_clockgating_state state) 1196 { 1197 int i, r = 0; 1198 1199 for (i = 0; i < adev->num_ip_blocks; i++) { 1200 if (!adev->ip_blocks[i].status.valid) 1201 continue; 1202 if (adev->ip_blocks[i].version->type != block_type) 1203 continue; 1204 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state) 1205 continue; 1206 r = adev->ip_blocks[i].version->funcs->set_clockgating_state( 1207 (void *)adev, state); 1208 if (r) 1209 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n", 1210 adev->ip_blocks[i].version->funcs->name, r); 1211 } 1212 return r; 1213 } 1214 1215 int amdgpu_set_powergating_state(struct amdgpu_device *adev, 1216 enum amd_ip_block_type block_type, 1217 enum amd_powergating_state state) 1218 { 1219 int i, r = 0; 1220 1221 for (i = 0; i < adev->num_ip_blocks; i++) { 1222 if (!adev->ip_blocks[i].status.valid) 1223 continue; 1224 if (adev->ip_blocks[i].version->type != block_type) 1225 continue; 1226 if (!adev->ip_blocks[i].version->funcs->set_powergating_state) 1227 continue; 1228 r = adev->ip_blocks[i].version->funcs->set_powergating_state( 1229 (void *)adev, state); 1230 if (r) 1231 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n", 1232 adev->ip_blocks[i].version->funcs->name, r); 1233 } 1234 return r; 1235 } 1236 1237 void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags) 1238 { 1239 int i; 1240 1241 for (i = 0; i < adev->num_ip_blocks; i++) { 1242 if (!adev->ip_blocks[i].status.valid) 1243 continue; 1244 if (adev->ip_blocks[i].version->funcs->get_clockgating_state) 1245 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags); 1246 } 1247 } 1248 1249 int amdgpu_wait_for_idle(struct amdgpu_device *adev, 1250 enum amd_ip_block_type block_type) 1251 { 1252 int i, r; 1253 1254 for (i = 0; i < adev->num_ip_blocks; i++) { 1255 if (!adev->ip_blocks[i].status.valid) 1256 continue; 1257 if (adev->ip_blocks[i].version->type == block_type) { 1258 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev); 1259 if (r) 1260 return r; 1261 break; 1262 } 1263 } 1264 return 0; 1265 1266 } 1267 1268 bool amdgpu_is_idle(struct amdgpu_device *adev, 1269 enum amd_ip_block_type block_type) 1270 { 1271 int i; 1272 1273 for (i = 0; i < adev->num_ip_blocks; i++) { 1274 if (!adev->ip_blocks[i].status.valid) 1275 continue; 1276 if (adev->ip_blocks[i].version->type == block_type) 1277 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev); 1278 } 1279 return true; 1280 1281 } 1282 1283 struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev, 1284 enum amd_ip_block_type type) 1285 { 1286 int i; 1287 1288 for (i = 0; i < adev->num_ip_blocks; i++) 1289 if (adev->ip_blocks[i].version->type == type) 1290 return &adev->ip_blocks[i]; 1291 1292 return NULL; 1293 } 1294 1295 /** 1296 * amdgpu_ip_block_version_cmp 1297 * 1298 * @adev: amdgpu_device pointer 1299 * @type: enum amd_ip_block_type 1300 * @major: major version 1301 * @minor: minor version 1302 * 1303 * return 0 if equal or greater 1304 * return 1 if smaller or the ip_block doesn't exist 1305 */ 1306 int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, 1307 enum amd_ip_block_type type, 1308 u32 major, u32 minor) 1309 { 1310 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type); 1311 1312 if (ip_block && ((ip_block->version->major > major) || 1313 ((ip_block->version->major == major) && 1314 (ip_block->version->minor >= minor)))) 1315 return 0; 1316 1317 return 1; 1318 } 1319 1320 /** 1321 * amdgpu_ip_block_add 1322 * 1323 * @adev: amdgpu_device pointer 1324 * @ip_block_version: pointer to the IP to add 1325 * 1326 * Adds the IP block driver information to the collection of IPs 1327 * on the asic. 1328 */ 1329 int amdgpu_ip_block_add(struct amdgpu_device *adev, 1330 const struct amdgpu_ip_block_version *ip_block_version) 1331 { 1332 if (!ip_block_version) 1333 return -EINVAL; 1334 1335 DRM_DEBUG("add ip block number %d <%s>\n", adev->num_ip_blocks, 1336 ip_block_version->funcs->name); 1337 1338 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version; 1339 1340 return 0; 1341 } 1342 1343 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) 1344 { 1345 adev->enable_virtual_display = false; 1346 1347 if (amdgpu_virtual_display) { 1348 struct drm_device *ddev = adev->ddev; 1349 const char *pci_address_name = pci_name(ddev->pdev); 1350 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname; 1351 1352 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL); 1353 pciaddstr_tmp = pciaddstr; 1354 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) { 1355 pciaddname = strsep(&pciaddname_tmp, ","); 1356 if (!strcmp("all", pciaddname) 1357 || !strcmp(pci_address_name, pciaddname)) { 1358 long num_crtc; 1359 int res = -1; 1360 1361 adev->enable_virtual_display = true; 1362 1363 if (pciaddname_tmp) 1364 res = kstrtol(pciaddname_tmp, 10, 1365 &num_crtc); 1366 1367 if (!res) { 1368 if (num_crtc < 1) 1369 num_crtc = 1; 1370 if (num_crtc > 6) 1371 num_crtc = 6; 1372 adev->mode_info.num_crtc = num_crtc; 1373 } else { 1374 adev->mode_info.num_crtc = 1; 1375 } 1376 break; 1377 } 1378 } 1379 1380 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n", 1381 amdgpu_virtual_display, pci_address_name, 1382 adev->enable_virtual_display, adev->mode_info.num_crtc); 1383 1384 kfree(pciaddstr); 1385 } 1386 } 1387 1388 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) 1389 { 1390 const char *chip_name; 1391 char fw_name[30]; 1392 int err; 1393 const struct gpu_info_firmware_header_v1_0 *hdr; 1394 1395 adev->firmware.gpu_info_fw = NULL; 1396 1397 switch (adev->asic_type) { 1398 case CHIP_TOPAZ: 1399 case CHIP_TONGA: 1400 case CHIP_FIJI: 1401 case CHIP_POLARIS11: 1402 case CHIP_POLARIS10: 1403 case CHIP_POLARIS12: 1404 case CHIP_CARRIZO: 1405 case CHIP_STONEY: 1406 #ifdef CONFIG_DRM_AMDGPU_SI 1407 case CHIP_VERDE: 1408 case CHIP_TAHITI: 1409 case CHIP_PITCAIRN: 1410 case CHIP_OLAND: 1411 case CHIP_HAINAN: 1412 #endif 1413 #ifdef CONFIG_DRM_AMDGPU_CIK 1414 case CHIP_BONAIRE: 1415 case CHIP_HAWAII: 1416 case CHIP_KAVERI: 1417 case CHIP_KABINI: 1418 case CHIP_MULLINS: 1419 #endif 1420 default: 1421 return 0; 1422 case CHIP_VEGA10: 1423 chip_name = "vega10"; 1424 break; 1425 case CHIP_RAVEN: 1426 chip_name = "raven"; 1427 break; 1428 } 1429 1430 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name); 1431 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev); 1432 if (err) { 1433 dev_err(adev->dev, 1434 "Failed to load gpu_info firmware \"%s\"\n", 1435 fw_name); 1436 goto out; 1437 } 1438 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw); 1439 if (err) { 1440 dev_err(adev->dev, 1441 "Failed to validate gpu_info firmware \"%s\"\n", 1442 fw_name); 1443 goto out; 1444 } 1445 1446 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data; 1447 amdgpu_ucode_print_gpu_info_hdr(&hdr->header); 1448 1449 switch (hdr->version_major) { 1450 case 1: 1451 { 1452 const struct gpu_info_firmware_v1_0 *gpu_info_fw = 1453 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data + 1454 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 1455 1456 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se); 1457 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh); 1458 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se); 1459 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se); 1460 adev->gfx.config.max_texture_channel_caches = 1461 le32_to_cpu(gpu_info_fw->gc_num_tccs); 1462 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs); 1463 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds); 1464 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth); 1465 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth); 1466 adev->gfx.config.double_offchip_lds_buf = 1467 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer); 1468 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size); 1469 adev->gfx.cu_info.max_waves_per_simd = 1470 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd); 1471 adev->gfx.cu_info.max_scratch_slots_per_cu = 1472 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu); 1473 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size); 1474 break; 1475 } 1476 default: 1477 dev_err(adev->dev, 1478 "Unsupported gpu_info table %d\n", hdr->header.ucode_version); 1479 err = -EINVAL; 1480 goto out; 1481 } 1482 out: 1483 return err; 1484 } 1485 1486 static int amdgpu_early_init(struct amdgpu_device *adev) 1487 { 1488 int i, r; 1489 1490 amdgpu_device_enable_virtual_display(adev); 1491 1492 switch (adev->asic_type) { 1493 case CHIP_TOPAZ: 1494 case CHIP_TONGA: 1495 case CHIP_FIJI: 1496 case CHIP_POLARIS11: 1497 case CHIP_POLARIS10: 1498 case CHIP_POLARIS12: 1499 case CHIP_CARRIZO: 1500 case CHIP_STONEY: 1501 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) 1502 adev->family = AMDGPU_FAMILY_CZ; 1503 else 1504 adev->family = AMDGPU_FAMILY_VI; 1505 1506 r = vi_set_ip_blocks(adev); 1507 if (r) 1508 return r; 1509 break; 1510 #ifdef CONFIG_DRM_AMDGPU_SI 1511 case CHIP_VERDE: 1512 case CHIP_TAHITI: 1513 case CHIP_PITCAIRN: 1514 case CHIP_OLAND: 1515 case CHIP_HAINAN: 1516 adev->family = AMDGPU_FAMILY_SI; 1517 r = si_set_ip_blocks(adev); 1518 if (r) 1519 return r; 1520 break; 1521 #endif 1522 #ifdef CONFIG_DRM_AMDGPU_CIK 1523 case CHIP_BONAIRE: 1524 case CHIP_HAWAII: 1525 case CHIP_KAVERI: 1526 case CHIP_KABINI: 1527 case CHIP_MULLINS: 1528 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII)) 1529 adev->family = AMDGPU_FAMILY_CI; 1530 else 1531 adev->family = AMDGPU_FAMILY_KV; 1532 1533 r = cik_set_ip_blocks(adev); 1534 if (r) 1535 return r; 1536 break; 1537 #endif 1538 case CHIP_VEGA10: 1539 case CHIP_RAVEN: 1540 if (adev->asic_type == CHIP_RAVEN) 1541 adev->family = AMDGPU_FAMILY_RV; 1542 else 1543 adev->family = AMDGPU_FAMILY_AI; 1544 1545 r = soc15_set_ip_blocks(adev); 1546 if (r) 1547 return r; 1548 break; 1549 default: 1550 /* FIXME: not supported yet */ 1551 return -EINVAL; 1552 } 1553 1554 r = amdgpu_device_parse_gpu_info_fw(adev); 1555 if (r) 1556 return r; 1557 1558 if (amdgpu_sriov_vf(adev)) { 1559 r = amdgpu_virt_request_full_gpu(adev, true); 1560 if (r) 1561 return r; 1562 } 1563 1564 for (i = 0; i < adev->num_ip_blocks; i++) { 1565 if ((amdgpu_ip_block_mask & (1 << i)) == 0) { 1566 DRM_ERROR("disabled ip block: %d <%s>\n", 1567 i, adev->ip_blocks[i].version->funcs->name); 1568 adev->ip_blocks[i].status.valid = false; 1569 } else { 1570 if (adev->ip_blocks[i].version->funcs->early_init) { 1571 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev); 1572 if (r == -ENOENT) { 1573 adev->ip_blocks[i].status.valid = false; 1574 } else if (r) { 1575 DRM_ERROR("early_init of IP block <%s> failed %d\n", 1576 adev->ip_blocks[i].version->funcs->name, r); 1577 return r; 1578 } else { 1579 adev->ip_blocks[i].status.valid = true; 1580 } 1581 } else { 1582 adev->ip_blocks[i].status.valid = true; 1583 } 1584 } 1585 } 1586 1587 adev->cg_flags &= amdgpu_cg_mask; 1588 adev->pg_flags &= amdgpu_pg_mask; 1589 1590 return 0; 1591 } 1592 1593 static int amdgpu_init(struct amdgpu_device *adev) 1594 { 1595 int i, r; 1596 1597 for (i = 0; i < adev->num_ip_blocks; i++) { 1598 if (!adev->ip_blocks[i].status.valid) 1599 continue; 1600 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev); 1601 if (r) { 1602 DRM_ERROR("sw_init of IP block <%s> failed %d\n", 1603 adev->ip_blocks[i].version->funcs->name, r); 1604 return r; 1605 } 1606 adev->ip_blocks[i].status.sw = true; 1607 /* need to do gmc hw init early so we can allocate gpu mem */ 1608 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { 1609 r = amdgpu_vram_scratch_init(adev); 1610 if (r) { 1611 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); 1612 return r; 1613 } 1614 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); 1615 if (r) { 1616 DRM_ERROR("hw_init %d failed %d\n", i, r); 1617 return r; 1618 } 1619 r = amdgpu_wb_init(adev); 1620 if (r) { 1621 DRM_ERROR("amdgpu_wb_init failed %d\n", r); 1622 return r; 1623 } 1624 adev->ip_blocks[i].status.hw = true; 1625 1626 /* right after GMC hw init, we create CSA */ 1627 if (amdgpu_sriov_vf(adev)) { 1628 r = amdgpu_allocate_static_csa(adev); 1629 if (r) { 1630 DRM_ERROR("allocate CSA failed %d\n", r); 1631 return r; 1632 } 1633 } 1634 } 1635 } 1636 1637 for (i = 0; i < adev->num_ip_blocks; i++) { 1638 if (!adev->ip_blocks[i].status.sw) 1639 continue; 1640 /* gmc hw init is done early */ 1641 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) 1642 continue; 1643 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); 1644 if (r) { 1645 DRM_ERROR("hw_init of IP block <%s> failed %d\n", 1646 adev->ip_blocks[i].version->funcs->name, r); 1647 return r; 1648 } 1649 adev->ip_blocks[i].status.hw = true; 1650 } 1651 1652 return 0; 1653 } 1654 1655 static void amdgpu_fill_reset_magic(struct amdgpu_device *adev) 1656 { 1657 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM); 1658 } 1659 1660 static bool amdgpu_check_vram_lost(struct amdgpu_device *adev) 1661 { 1662 return !!memcmp(adev->gart.ptr, adev->reset_magic, 1663 AMDGPU_RESET_MAGIC_NUM); 1664 } 1665 1666 static int amdgpu_late_set_cg_state(struct amdgpu_device *adev) 1667 { 1668 int i = 0, r; 1669 1670 for (i = 0; i < adev->num_ip_blocks; i++) { 1671 if (!adev->ip_blocks[i].status.valid) 1672 continue; 1673 /* skip CG for VCE/UVD, it's handled specially */ 1674 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 1675 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) { 1676 /* enable clockgating to save power */ 1677 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, 1678 AMD_CG_STATE_GATE); 1679 if (r) { 1680 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", 1681 adev->ip_blocks[i].version->funcs->name, r); 1682 return r; 1683 } 1684 } 1685 } 1686 return 0; 1687 } 1688 1689 static int amdgpu_late_init(struct amdgpu_device *adev) 1690 { 1691 int i = 0, r; 1692 1693 for (i = 0; i < adev->num_ip_blocks; i++) { 1694 if (!adev->ip_blocks[i].status.valid) 1695 continue; 1696 if (adev->ip_blocks[i].version->funcs->late_init) { 1697 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev); 1698 if (r) { 1699 DRM_ERROR("late_init of IP block <%s> failed %d\n", 1700 adev->ip_blocks[i].version->funcs->name, r); 1701 return r; 1702 } 1703 adev->ip_blocks[i].status.late_initialized = true; 1704 } 1705 } 1706 1707 mod_delayed_work(system_wq, &adev->late_init_work, 1708 msecs_to_jiffies(AMDGPU_RESUME_MS)); 1709 1710 amdgpu_fill_reset_magic(adev); 1711 1712 return 0; 1713 } 1714 1715 static int amdgpu_fini(struct amdgpu_device *adev) 1716 { 1717 int i, r; 1718 1719 /* need to disable SMC first */ 1720 for (i = 0; i < adev->num_ip_blocks; i++) { 1721 if (!adev->ip_blocks[i].status.hw) 1722 continue; 1723 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { 1724 /* ungate blocks before hw fini so that we can shutdown the blocks safely */ 1725 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, 1726 AMD_CG_STATE_UNGATE); 1727 if (r) { 1728 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", 1729 adev->ip_blocks[i].version->funcs->name, r); 1730 return r; 1731 } 1732 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); 1733 /* XXX handle errors */ 1734 if (r) { 1735 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", 1736 adev->ip_blocks[i].version->funcs->name, r); 1737 } 1738 adev->ip_blocks[i].status.hw = false; 1739 break; 1740 } 1741 } 1742 1743 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1744 if (!adev->ip_blocks[i].status.hw) 1745 continue; 1746 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { 1747 amdgpu_wb_fini(adev); 1748 amdgpu_vram_scratch_fini(adev); 1749 } 1750 1751 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 1752 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) { 1753 /* ungate blocks before hw fini so that we can shutdown the blocks safely */ 1754 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, 1755 AMD_CG_STATE_UNGATE); 1756 if (r) { 1757 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", 1758 adev->ip_blocks[i].version->funcs->name, r); 1759 return r; 1760 } 1761 } 1762 1763 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); 1764 /* XXX handle errors */ 1765 if (r) { 1766 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", 1767 adev->ip_blocks[i].version->funcs->name, r); 1768 } 1769 1770 adev->ip_blocks[i].status.hw = false; 1771 } 1772 1773 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1774 if (!adev->ip_blocks[i].status.sw) 1775 continue; 1776 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev); 1777 /* XXX handle errors */ 1778 if (r) { 1779 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", 1780 adev->ip_blocks[i].version->funcs->name, r); 1781 } 1782 adev->ip_blocks[i].status.sw = false; 1783 adev->ip_blocks[i].status.valid = false; 1784 } 1785 1786 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1787 if (!adev->ip_blocks[i].status.late_initialized) 1788 continue; 1789 if (adev->ip_blocks[i].version->funcs->late_fini) 1790 adev->ip_blocks[i].version->funcs->late_fini((void *)adev); 1791 adev->ip_blocks[i].status.late_initialized = false; 1792 } 1793 1794 if (amdgpu_sriov_vf(adev)) 1795 amdgpu_virt_release_full_gpu(adev, false); 1796 1797 return 0; 1798 } 1799 1800 static void amdgpu_late_init_func_handler(struct work_struct *work) 1801 { 1802 struct amdgpu_device *adev = 1803 container_of(work, struct amdgpu_device, late_init_work.work); 1804 amdgpu_late_set_cg_state(adev); 1805 } 1806 1807 int amdgpu_suspend(struct amdgpu_device *adev) 1808 { 1809 int i, r; 1810 1811 if (amdgpu_sriov_vf(adev)) 1812 amdgpu_virt_request_full_gpu(adev, false); 1813 1814 /* ungate SMC block first */ 1815 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC, 1816 AMD_CG_STATE_UNGATE); 1817 if (r) { 1818 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r); 1819 } 1820 1821 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1822 if (!adev->ip_blocks[i].status.valid) 1823 continue; 1824 /* ungate blocks so that suspend can properly shut them down */ 1825 if (i != AMD_IP_BLOCK_TYPE_SMC) { 1826 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, 1827 AMD_CG_STATE_UNGATE); 1828 if (r) { 1829 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", 1830 adev->ip_blocks[i].version->funcs->name, r); 1831 } 1832 } 1833 /* XXX handle errors */ 1834 r = adev->ip_blocks[i].version->funcs->suspend(adev); 1835 /* XXX handle errors */ 1836 if (r) { 1837 DRM_ERROR("suspend of IP block <%s> failed %d\n", 1838 adev->ip_blocks[i].version->funcs->name, r); 1839 } 1840 } 1841 1842 if (amdgpu_sriov_vf(adev)) 1843 amdgpu_virt_release_full_gpu(adev, false); 1844 1845 return 0; 1846 } 1847 1848 static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev) 1849 { 1850 int i, r; 1851 1852 static enum amd_ip_block_type ip_order[] = { 1853 AMD_IP_BLOCK_TYPE_GMC, 1854 AMD_IP_BLOCK_TYPE_COMMON, 1855 AMD_IP_BLOCK_TYPE_IH, 1856 }; 1857 1858 for (i = 0; i < ARRAY_SIZE(ip_order); i++) { 1859 int j; 1860 struct amdgpu_ip_block *block; 1861 1862 for (j = 0; j < adev->num_ip_blocks; j++) { 1863 block = &adev->ip_blocks[j]; 1864 1865 if (block->version->type != ip_order[i] || 1866 !block->status.valid) 1867 continue; 1868 1869 r = block->version->funcs->hw_init(adev); 1870 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed"); 1871 } 1872 } 1873 1874 return 0; 1875 } 1876 1877 static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev) 1878 { 1879 int i, r; 1880 1881 static enum amd_ip_block_type ip_order[] = { 1882 AMD_IP_BLOCK_TYPE_SMC, 1883 AMD_IP_BLOCK_TYPE_DCE, 1884 AMD_IP_BLOCK_TYPE_GFX, 1885 AMD_IP_BLOCK_TYPE_SDMA, 1886 AMD_IP_BLOCK_TYPE_UVD, 1887 AMD_IP_BLOCK_TYPE_VCE 1888 }; 1889 1890 for (i = 0; i < ARRAY_SIZE(ip_order); i++) { 1891 int j; 1892 struct amdgpu_ip_block *block; 1893 1894 for (j = 0; j < adev->num_ip_blocks; j++) { 1895 block = &adev->ip_blocks[j]; 1896 1897 if (block->version->type != ip_order[i] || 1898 !block->status.valid) 1899 continue; 1900 1901 r = block->version->funcs->hw_init(adev); 1902 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed"); 1903 } 1904 } 1905 1906 return 0; 1907 } 1908 1909 static int amdgpu_resume_phase1(struct amdgpu_device *adev) 1910 { 1911 int i, r; 1912 1913 for (i = 0; i < adev->num_ip_blocks; i++) { 1914 if (!adev->ip_blocks[i].status.valid) 1915 continue; 1916 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 1917 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 1918 adev->ip_blocks[i].version->type == 1919 AMD_IP_BLOCK_TYPE_IH) { 1920 r = adev->ip_blocks[i].version->funcs->resume(adev); 1921 if (r) { 1922 DRM_ERROR("resume of IP block <%s> failed %d\n", 1923 adev->ip_blocks[i].version->funcs->name, r); 1924 return r; 1925 } 1926 } 1927 } 1928 1929 return 0; 1930 } 1931 1932 static int amdgpu_resume_phase2(struct amdgpu_device *adev) 1933 { 1934 int i, r; 1935 1936 for (i = 0; i < adev->num_ip_blocks; i++) { 1937 if (!adev->ip_blocks[i].status.valid) 1938 continue; 1939 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 1940 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 1941 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ) 1942 continue; 1943 r = adev->ip_blocks[i].version->funcs->resume(adev); 1944 if (r) { 1945 DRM_ERROR("resume of IP block <%s> failed %d\n", 1946 adev->ip_blocks[i].version->funcs->name, r); 1947 return r; 1948 } 1949 } 1950 1951 return 0; 1952 } 1953 1954 static int amdgpu_resume(struct amdgpu_device *adev) 1955 { 1956 int r; 1957 1958 r = amdgpu_resume_phase1(adev); 1959 if (r) 1960 return r; 1961 r = amdgpu_resume_phase2(adev); 1962 1963 return r; 1964 } 1965 1966 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) 1967 { 1968 if (adev->is_atom_fw) { 1969 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev)) 1970 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; 1971 } else { 1972 if (amdgpu_atombios_has_gpu_virtualization_table(adev)) 1973 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; 1974 } 1975 } 1976 1977 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) 1978 { 1979 switch (asic_type) { 1980 #if defined(CONFIG_DRM_AMD_DC) 1981 case CHIP_BONAIRE: 1982 case CHIP_HAWAII: 1983 case CHIP_CARRIZO: 1984 case CHIP_STONEY: 1985 case CHIP_POLARIS11: 1986 case CHIP_POLARIS10: 1987 case CHIP_TONGA: 1988 case CHIP_FIJI: 1989 #if defined(CONFIG_DRM_AMD_DC_PRE_VEGA) 1990 return amdgpu_dc != 0; 1991 #else 1992 return amdgpu_dc > 0; 1993 #endif 1994 #endif 1995 default: 1996 return false; 1997 } 1998 } 1999 2000 /** 2001 * amdgpu_device_has_dc_support - check if dc is supported 2002 * 2003 * @adev: amdgpu_device_pointer 2004 * 2005 * Returns true for supported, false for not supported 2006 */ 2007 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev) 2008 { 2009 if (amdgpu_sriov_vf(adev)) 2010 return false; 2011 2012 return amdgpu_device_asic_has_dc_support(adev->asic_type); 2013 } 2014 2015 /** 2016 * amdgpu_device_init - initialize the driver 2017 * 2018 * @adev: amdgpu_device pointer 2019 * @pdev: drm dev pointer 2020 * @pdev: pci dev pointer 2021 * @flags: driver flags 2022 * 2023 * Initializes the driver info and hw (all asics). 2024 * Returns 0 for success or an error on failure. 2025 * Called at driver startup. 2026 */ 2027 int amdgpu_device_init(struct amdgpu_device *adev, 2028 struct drm_device *ddev, 2029 struct pci_dev *pdev, 2030 uint32_t flags) 2031 { 2032 int r, i; 2033 bool runtime = false; 2034 u32 max_MBps; 2035 2036 adev->shutdown = false; 2037 adev->dev = &pdev->dev; 2038 adev->ddev = ddev; 2039 adev->pdev = pdev; 2040 adev->flags = flags; 2041 adev->asic_type = flags & AMD_ASIC_MASK; 2042 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; 2043 adev->mc.gart_size = 512 * 1024 * 1024; 2044 adev->accel_working = false; 2045 adev->num_rings = 0; 2046 adev->mman.buffer_funcs = NULL; 2047 adev->mman.buffer_funcs_ring = NULL; 2048 adev->vm_manager.vm_pte_funcs = NULL; 2049 adev->vm_manager.vm_pte_num_rings = 0; 2050 adev->gart.gart_funcs = NULL; 2051 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); 2052 2053 adev->smc_rreg = &amdgpu_invalid_rreg; 2054 adev->smc_wreg = &amdgpu_invalid_wreg; 2055 adev->pcie_rreg = &amdgpu_invalid_rreg; 2056 adev->pcie_wreg = &amdgpu_invalid_wreg; 2057 adev->pciep_rreg = &amdgpu_invalid_rreg; 2058 adev->pciep_wreg = &amdgpu_invalid_wreg; 2059 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg; 2060 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg; 2061 adev->didt_rreg = &amdgpu_invalid_rreg; 2062 adev->didt_wreg = &amdgpu_invalid_wreg; 2063 adev->gc_cac_rreg = &amdgpu_invalid_rreg; 2064 adev->gc_cac_wreg = &amdgpu_invalid_wreg; 2065 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; 2066 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; 2067 2068 2069 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", 2070 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, 2071 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); 2072 2073 /* mutex initialization are all done here so we 2074 * can recall function without having locking issues */ 2075 atomic_set(&adev->irq.ih.lock, 0); 2076 mutex_init(&adev->firmware.mutex); 2077 mutex_init(&adev->pm.mutex); 2078 mutex_init(&adev->gfx.gpu_clock_mutex); 2079 mutex_init(&adev->srbm_mutex); 2080 mutex_init(&adev->grbm_idx_mutex); 2081 mutex_init(&adev->mn_lock); 2082 hash_init(adev->mn_hash); 2083 2084 amdgpu_check_arguments(adev); 2085 2086 spin_lock_init(&adev->mmio_idx_lock); 2087 spin_lock_init(&adev->smc_idx_lock); 2088 spin_lock_init(&adev->pcie_idx_lock); 2089 spin_lock_init(&adev->uvd_ctx_idx_lock); 2090 spin_lock_init(&adev->didt_idx_lock); 2091 spin_lock_init(&adev->gc_cac_idx_lock); 2092 spin_lock_init(&adev->se_cac_idx_lock); 2093 spin_lock_init(&adev->audio_endpt_idx_lock); 2094 spin_lock_init(&adev->mm_stats.lock); 2095 2096 INIT_LIST_HEAD(&adev->shadow_list); 2097 mutex_init(&adev->shadow_list_lock); 2098 2099 INIT_LIST_HEAD(&adev->gtt_list); 2100 spin_lock_init(&adev->gtt_list_lock); 2101 2102 INIT_LIST_HEAD(&adev->ring_lru_list); 2103 spin_lock_init(&adev->ring_lru_list_lock); 2104 2105 INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler); 2106 2107 /* Registers mapping */ 2108 /* TODO: block userspace mapping of io register */ 2109 if (adev->asic_type >= CHIP_BONAIRE) { 2110 adev->rmmio_base = pci_resource_start(adev->pdev, 5); 2111 adev->rmmio_size = pci_resource_len(adev->pdev, 5); 2112 } else { 2113 adev->rmmio_base = pci_resource_start(adev->pdev, 2); 2114 adev->rmmio_size = pci_resource_len(adev->pdev, 2); 2115 } 2116 2117 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); 2118 if (adev->rmmio == NULL) { 2119 return -ENOMEM; 2120 } 2121 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); 2122 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); 2123 2124 /* doorbell bar mapping */ 2125 amdgpu_doorbell_init(adev); 2126 2127 /* io port mapping */ 2128 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 2129 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) { 2130 adev->rio_mem_size = pci_resource_len(adev->pdev, i); 2131 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size); 2132 break; 2133 } 2134 } 2135 if (adev->rio_mem == NULL) 2136 DRM_INFO("PCI I/O BAR is not found.\n"); 2137 2138 /* early init functions */ 2139 r = amdgpu_early_init(adev); 2140 if (r) 2141 return r; 2142 2143 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */ 2144 /* this will fail for cards that aren't VGA class devices, just 2145 * ignore it */ 2146 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode); 2147 2148 if (amdgpu_runtime_pm == 1) 2149 runtime = true; 2150 if (amdgpu_device_is_px(ddev)) 2151 runtime = true; 2152 if (!pci_is_thunderbolt_attached(adev->pdev)) 2153 vga_switcheroo_register_client(adev->pdev, 2154 &amdgpu_switcheroo_ops, runtime); 2155 if (runtime) 2156 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); 2157 2158 /* Read BIOS */ 2159 if (!amdgpu_get_bios(adev)) { 2160 r = -EINVAL; 2161 goto failed; 2162 } 2163 2164 r = amdgpu_atombios_init(adev); 2165 if (r) { 2166 dev_err(adev->dev, "amdgpu_atombios_init failed\n"); 2167 amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0); 2168 goto failed; 2169 } 2170 2171 /* detect if we are with an SRIOV vbios */ 2172 amdgpu_device_detect_sriov_bios(adev); 2173 2174 /* Post card if necessary */ 2175 if (amdgpu_vpost_needed(adev)) { 2176 if (!adev->bios) { 2177 dev_err(adev->dev, "no vBIOS found\n"); 2178 amdgpu_vf_error_put(AMDGIM_ERROR_VF_NO_VBIOS, 0, 0); 2179 r = -EINVAL; 2180 goto failed; 2181 } 2182 DRM_INFO("GPU posting now...\n"); 2183 r = amdgpu_atom_asic_init(adev->mode_info.atom_context); 2184 if (r) { 2185 dev_err(adev->dev, "gpu post error!\n"); 2186 amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_POST_ERROR, 0, 0); 2187 goto failed; 2188 } 2189 } else { 2190 DRM_INFO("GPU post is not needed\n"); 2191 } 2192 2193 if (adev->is_atom_fw) { 2194 /* Initialize clocks */ 2195 r = amdgpu_atomfirmware_get_clock_info(adev); 2196 if (r) { 2197 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n"); 2198 amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); 2199 goto failed; 2200 } 2201 } else { 2202 /* Initialize clocks */ 2203 r = amdgpu_atombios_get_clock_info(adev); 2204 if (r) { 2205 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); 2206 amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); 2207 goto failed; 2208 } 2209 /* init i2c buses */ 2210 if (!amdgpu_device_has_dc_support(adev)) 2211 amdgpu_atombios_i2c_init(adev); 2212 } 2213 2214 /* Fence driver */ 2215 r = amdgpu_fence_driver_init(adev); 2216 if (r) { 2217 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n"); 2218 amdgpu_vf_error_put(AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0); 2219 goto failed; 2220 } 2221 2222 /* init the mode config */ 2223 drm_mode_config_init(adev->ddev); 2224 2225 r = amdgpu_init(adev); 2226 if (r) { 2227 dev_err(adev->dev, "amdgpu_init failed\n"); 2228 amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); 2229 amdgpu_fini(adev); 2230 goto failed; 2231 } 2232 2233 adev->accel_working = true; 2234 2235 amdgpu_vm_check_compute_bug(adev); 2236 2237 /* Initialize the buffer migration limit. */ 2238 if (amdgpu_moverate >= 0) 2239 max_MBps = amdgpu_moverate; 2240 else 2241 max_MBps = 8; /* Allow 8 MB/s. */ 2242 /* Get a log2 for easy divisions. */ 2243 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps)); 2244 2245 r = amdgpu_ib_pool_init(adev); 2246 if (r) { 2247 dev_err(adev->dev, "IB initialization failed (%d).\n", r); 2248 amdgpu_vf_error_put(AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r); 2249 goto failed; 2250 } 2251 2252 r = amdgpu_ib_ring_tests(adev); 2253 if (r) 2254 DRM_ERROR("ib ring test failed (%d).\n", r); 2255 2256 amdgpu_fbdev_init(adev); 2257 2258 r = amdgpu_gem_debugfs_init(adev); 2259 if (r) 2260 DRM_ERROR("registering gem debugfs failed (%d).\n", r); 2261 2262 r = amdgpu_debugfs_regs_init(adev); 2263 if (r) 2264 DRM_ERROR("registering register debugfs failed (%d).\n", r); 2265 2266 r = amdgpu_debugfs_test_ib_ring_init(adev); 2267 if (r) 2268 DRM_ERROR("registering register test ib ring debugfs failed (%d).\n", r); 2269 2270 r = amdgpu_debugfs_firmware_init(adev); 2271 if (r) 2272 DRM_ERROR("registering firmware debugfs failed (%d).\n", r); 2273 2274 r = amdgpu_debugfs_vbios_dump_init(adev); 2275 if (r) 2276 DRM_ERROR("Creating vbios dump debugfs failed (%d).\n", r); 2277 2278 if ((amdgpu_testing & 1)) { 2279 if (adev->accel_working) 2280 amdgpu_test_moves(adev); 2281 else 2282 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n"); 2283 } 2284 if (amdgpu_benchmarking) { 2285 if (adev->accel_working) 2286 amdgpu_benchmark(adev, amdgpu_benchmarking); 2287 else 2288 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n"); 2289 } 2290 2291 /* enable clockgating, etc. after ib tests, etc. since some blocks require 2292 * explicit gating rather than handling it automatically. 2293 */ 2294 r = amdgpu_late_init(adev); 2295 if (r) { 2296 dev_err(adev->dev, "amdgpu_late_init failed\n"); 2297 amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r); 2298 goto failed; 2299 } 2300 2301 return 0; 2302 2303 failed: 2304 amdgpu_vf_error_trans_all(adev); 2305 if (runtime) 2306 vga_switcheroo_fini_domain_pm_ops(adev->dev); 2307 return r; 2308 } 2309 2310 /** 2311 * amdgpu_device_fini - tear down the driver 2312 * 2313 * @adev: amdgpu_device pointer 2314 * 2315 * Tear down the driver info (all asics). 2316 * Called at driver shutdown. 2317 */ 2318 void amdgpu_device_fini(struct amdgpu_device *adev) 2319 { 2320 int r; 2321 2322 DRM_INFO("amdgpu: finishing device.\n"); 2323 adev->shutdown = true; 2324 if (adev->mode_info.mode_config_initialized) 2325 drm_crtc_force_disable_all(adev->ddev); 2326 /* evict vram memory */ 2327 amdgpu_bo_evict_vram(adev); 2328 amdgpu_ib_pool_fini(adev); 2329 amdgpu_fence_driver_fini(adev); 2330 amdgpu_fbdev_fini(adev); 2331 r = amdgpu_fini(adev); 2332 if (adev->firmware.gpu_info_fw) { 2333 release_firmware(adev->firmware.gpu_info_fw); 2334 adev->firmware.gpu_info_fw = NULL; 2335 } 2336 adev->accel_working = false; 2337 cancel_delayed_work_sync(&adev->late_init_work); 2338 /* free i2c buses */ 2339 if (!amdgpu_device_has_dc_support(adev)) 2340 amdgpu_i2c_fini(adev); 2341 amdgpu_atombios_fini(adev); 2342 kfree(adev->bios); 2343 adev->bios = NULL; 2344 if (!pci_is_thunderbolt_attached(adev->pdev)) 2345 vga_switcheroo_unregister_client(adev->pdev); 2346 if (adev->flags & AMD_IS_PX) 2347 vga_switcheroo_fini_domain_pm_ops(adev->dev); 2348 vga_client_register(adev->pdev, NULL, NULL, NULL); 2349 if (adev->rio_mem) 2350 pci_iounmap(adev->pdev, adev->rio_mem); 2351 adev->rio_mem = NULL; 2352 iounmap(adev->rmmio); 2353 adev->rmmio = NULL; 2354 amdgpu_doorbell_fini(adev); 2355 amdgpu_debugfs_regs_cleanup(adev); 2356 } 2357 2358 2359 /* 2360 * Suspend & resume. 2361 */ 2362 /** 2363 * amdgpu_device_suspend - initiate device suspend 2364 * 2365 * @pdev: drm dev pointer 2366 * @state: suspend state 2367 * 2368 * Puts the hw in the suspend state (all asics). 2369 * Returns 0 for success or an error on failure. 2370 * Called at driver suspend. 2371 */ 2372 int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) 2373 { 2374 struct amdgpu_device *adev; 2375 struct drm_crtc *crtc; 2376 struct drm_connector *connector; 2377 int r; 2378 2379 if (dev == NULL || dev->dev_private == NULL) { 2380 return -ENODEV; 2381 } 2382 2383 adev = dev->dev_private; 2384 2385 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 2386 return 0; 2387 2388 drm_kms_helper_poll_disable(dev); 2389 2390 if (!amdgpu_device_has_dc_support(adev)) { 2391 /* turn off display hw */ 2392 drm_modeset_lock_all(dev); 2393 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 2394 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 2395 } 2396 drm_modeset_unlock_all(dev); 2397 } 2398 2399 amdgpu_amdkfd_suspend(adev); 2400 2401 /* unpin the front buffers and cursors */ 2402 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 2403 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2404 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb); 2405 struct amdgpu_bo *robj; 2406 2407 if (amdgpu_crtc->cursor_bo) { 2408 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2409 r = amdgpu_bo_reserve(aobj, true); 2410 if (r == 0) { 2411 amdgpu_bo_unpin(aobj); 2412 amdgpu_bo_unreserve(aobj); 2413 } 2414 } 2415 2416 if (rfb == NULL || rfb->obj == NULL) { 2417 continue; 2418 } 2419 robj = gem_to_amdgpu_bo(rfb->obj); 2420 /* don't unpin kernel fb objects */ 2421 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) { 2422 r = amdgpu_bo_reserve(robj, true); 2423 if (r == 0) { 2424 amdgpu_bo_unpin(robj); 2425 amdgpu_bo_unreserve(robj); 2426 } 2427 } 2428 } 2429 /* evict vram memory */ 2430 amdgpu_bo_evict_vram(adev); 2431 2432 amdgpu_fence_driver_suspend(adev); 2433 2434 r = amdgpu_suspend(adev); 2435 2436 /* evict remaining vram memory 2437 * This second call to evict vram is to evict the gart page table 2438 * using the CPU. 2439 */ 2440 amdgpu_bo_evict_vram(adev); 2441 2442 amdgpu_atombios_scratch_regs_save(adev); 2443 pci_save_state(dev->pdev); 2444 if (suspend) { 2445 /* Shut down the device */ 2446 pci_disable_device(dev->pdev); 2447 pci_set_power_state(dev->pdev, PCI_D3hot); 2448 } else { 2449 r = amdgpu_asic_reset(adev); 2450 if (r) 2451 DRM_ERROR("amdgpu asic reset failed\n"); 2452 } 2453 2454 if (fbcon) { 2455 console_lock(); 2456 amdgpu_fbdev_set_suspend(adev, 1); 2457 console_unlock(); 2458 } 2459 return 0; 2460 } 2461 2462 /** 2463 * amdgpu_device_resume - initiate device resume 2464 * 2465 * @pdev: drm dev pointer 2466 * 2467 * Bring the hw back to operating state (all asics). 2468 * Returns 0 for success or an error on failure. 2469 * Called at driver resume. 2470 */ 2471 int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) 2472 { 2473 struct drm_connector *connector; 2474 struct amdgpu_device *adev = dev->dev_private; 2475 struct drm_crtc *crtc; 2476 int r = 0; 2477 2478 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 2479 return 0; 2480 2481 if (fbcon) 2482 console_lock(); 2483 2484 if (resume) { 2485 pci_set_power_state(dev->pdev, PCI_D0); 2486 pci_restore_state(dev->pdev); 2487 r = pci_enable_device(dev->pdev); 2488 if (r) 2489 goto unlock; 2490 } 2491 amdgpu_atombios_scratch_regs_restore(adev); 2492 2493 /* post card */ 2494 if (amdgpu_need_post(adev)) { 2495 r = amdgpu_atom_asic_init(adev->mode_info.atom_context); 2496 if (r) 2497 DRM_ERROR("amdgpu asic init failed\n"); 2498 } 2499 2500 r = amdgpu_resume(adev); 2501 if (r) { 2502 DRM_ERROR("amdgpu_resume failed (%d).\n", r); 2503 goto unlock; 2504 } 2505 amdgpu_fence_driver_resume(adev); 2506 2507 if (resume) { 2508 r = amdgpu_ib_ring_tests(adev); 2509 if (r) 2510 DRM_ERROR("ib ring test failed (%d).\n", r); 2511 } 2512 2513 r = amdgpu_late_init(adev); 2514 if (r) 2515 goto unlock; 2516 2517 /* pin cursors */ 2518 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 2519 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2520 2521 if (amdgpu_crtc->cursor_bo) { 2522 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2523 r = amdgpu_bo_reserve(aobj, true); 2524 if (r == 0) { 2525 r = amdgpu_bo_pin(aobj, 2526 AMDGPU_GEM_DOMAIN_VRAM, 2527 &amdgpu_crtc->cursor_addr); 2528 if (r != 0) 2529 DRM_ERROR("Failed to pin cursor BO (%d)\n", r); 2530 amdgpu_bo_unreserve(aobj); 2531 } 2532 } 2533 } 2534 r = amdgpu_amdkfd_resume(adev); 2535 if (r) 2536 return r; 2537 2538 /* blat the mode back in */ 2539 if (fbcon) { 2540 if (!amdgpu_device_has_dc_support(adev)) { 2541 /* pre DCE11 */ 2542 drm_helper_resume_force_mode(dev); 2543 2544 /* turn on display hw */ 2545 drm_modeset_lock_all(dev); 2546 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 2547 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 2548 } 2549 drm_modeset_unlock_all(dev); 2550 } else { 2551 /* 2552 * There is no equivalent atomic helper to turn on 2553 * display, so we defined our own function for this, 2554 * once suspend resume is supported by the atomic 2555 * framework this will be reworked 2556 */ 2557 amdgpu_dm_display_resume(adev); 2558 } 2559 } 2560 2561 drm_kms_helper_poll_enable(dev); 2562 2563 /* 2564 * Most of the connector probing functions try to acquire runtime pm 2565 * refs to ensure that the GPU is powered on when connector polling is 2566 * performed. Since we're calling this from a runtime PM callback, 2567 * trying to acquire rpm refs will cause us to deadlock. 2568 * 2569 * Since we're guaranteed to be holding the rpm lock, it's safe to 2570 * temporarily disable the rpm helpers so this doesn't deadlock us. 2571 */ 2572 #ifdef CONFIG_PM 2573 dev->dev->power.disable_depth++; 2574 #endif 2575 if (!amdgpu_device_has_dc_support(adev)) 2576 drm_helper_hpd_irq_event(dev); 2577 else 2578 drm_kms_helper_hotplug_event(dev); 2579 #ifdef CONFIG_PM 2580 dev->dev->power.disable_depth--; 2581 #endif 2582 2583 if (fbcon) 2584 amdgpu_fbdev_set_suspend(adev, 0); 2585 2586 unlock: 2587 if (fbcon) 2588 console_unlock(); 2589 2590 return r; 2591 } 2592 2593 static bool amdgpu_check_soft_reset(struct amdgpu_device *adev) 2594 { 2595 int i; 2596 bool asic_hang = false; 2597 2598 for (i = 0; i < adev->num_ip_blocks; i++) { 2599 if (!adev->ip_blocks[i].status.valid) 2600 continue; 2601 if (adev->ip_blocks[i].version->funcs->check_soft_reset) 2602 adev->ip_blocks[i].status.hang = 2603 adev->ip_blocks[i].version->funcs->check_soft_reset(adev); 2604 if (adev->ip_blocks[i].status.hang) { 2605 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name); 2606 asic_hang = true; 2607 } 2608 } 2609 return asic_hang; 2610 } 2611 2612 static int amdgpu_pre_soft_reset(struct amdgpu_device *adev) 2613 { 2614 int i, r = 0; 2615 2616 for (i = 0; i < adev->num_ip_blocks; i++) { 2617 if (!adev->ip_blocks[i].status.valid) 2618 continue; 2619 if (adev->ip_blocks[i].status.hang && 2620 adev->ip_blocks[i].version->funcs->pre_soft_reset) { 2621 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev); 2622 if (r) 2623 return r; 2624 } 2625 } 2626 2627 return 0; 2628 } 2629 2630 static bool amdgpu_need_full_reset(struct amdgpu_device *adev) 2631 { 2632 int i; 2633 2634 for (i = 0; i < adev->num_ip_blocks; i++) { 2635 if (!adev->ip_blocks[i].status.valid) 2636 continue; 2637 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) || 2638 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) || 2639 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) || 2640 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) || 2641 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { 2642 if (adev->ip_blocks[i].status.hang) { 2643 DRM_INFO("Some block need full reset!\n"); 2644 return true; 2645 } 2646 } 2647 } 2648 return false; 2649 } 2650 2651 static int amdgpu_soft_reset(struct amdgpu_device *adev) 2652 { 2653 int i, r = 0; 2654 2655 for (i = 0; i < adev->num_ip_blocks; i++) { 2656 if (!adev->ip_blocks[i].status.valid) 2657 continue; 2658 if (adev->ip_blocks[i].status.hang && 2659 adev->ip_blocks[i].version->funcs->soft_reset) { 2660 r = adev->ip_blocks[i].version->funcs->soft_reset(adev); 2661 if (r) 2662 return r; 2663 } 2664 } 2665 2666 return 0; 2667 } 2668 2669 static int amdgpu_post_soft_reset(struct amdgpu_device *adev) 2670 { 2671 int i, r = 0; 2672 2673 for (i = 0; i < adev->num_ip_blocks; i++) { 2674 if (!adev->ip_blocks[i].status.valid) 2675 continue; 2676 if (adev->ip_blocks[i].status.hang && 2677 adev->ip_blocks[i].version->funcs->post_soft_reset) 2678 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev); 2679 if (r) 2680 return r; 2681 } 2682 2683 return 0; 2684 } 2685 2686 bool amdgpu_need_backup(struct amdgpu_device *adev) 2687 { 2688 if (adev->flags & AMD_IS_APU) 2689 return false; 2690 2691 return amdgpu_lockup_timeout > 0 ? true : false; 2692 } 2693 2694 static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev, 2695 struct amdgpu_ring *ring, 2696 struct amdgpu_bo *bo, 2697 struct dma_fence **fence) 2698 { 2699 uint32_t domain; 2700 int r; 2701 2702 if (!bo->shadow) 2703 return 0; 2704 2705 r = amdgpu_bo_reserve(bo, true); 2706 if (r) 2707 return r; 2708 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); 2709 /* if bo has been evicted, then no need to recover */ 2710 if (domain == AMDGPU_GEM_DOMAIN_VRAM) { 2711 r = amdgpu_bo_validate(bo->shadow); 2712 if (r) { 2713 DRM_ERROR("bo validate failed!\n"); 2714 goto err; 2715 } 2716 2717 r = amdgpu_bo_restore_from_shadow(adev, ring, bo, 2718 NULL, fence, true); 2719 if (r) { 2720 DRM_ERROR("recover page table failed!\n"); 2721 goto err; 2722 } 2723 } 2724 err: 2725 amdgpu_bo_unreserve(bo); 2726 return r; 2727 } 2728 2729 /** 2730 * amdgpu_sriov_gpu_reset - reset the asic 2731 * 2732 * @adev: amdgpu device pointer 2733 * @job: which job trigger hang 2734 * 2735 * Attempt the reset the GPU if it has hung (all asics). 2736 * for SRIOV case. 2737 * Returns 0 for success or an error on failure. 2738 */ 2739 int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job) 2740 { 2741 int i, j, r = 0; 2742 int resched; 2743 struct amdgpu_bo *bo, *tmp; 2744 struct amdgpu_ring *ring; 2745 struct dma_fence *fence = NULL, *next = NULL; 2746 2747 mutex_lock(&adev->virt.lock_reset); 2748 atomic_inc(&adev->gpu_reset_counter); 2749 adev->in_sriov_reset = true; 2750 2751 /* block TTM */ 2752 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); 2753 2754 /* we start from the ring trigger GPU hang */ 2755 j = job ? job->ring->idx : 0; 2756 2757 /* block scheduler */ 2758 for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) { 2759 ring = adev->rings[i % AMDGPU_MAX_RINGS]; 2760 if (!ring || !ring->sched.thread) 2761 continue; 2762 2763 kthread_park(ring->sched.thread); 2764 2765 if (job && j != i) 2766 continue; 2767 2768 /* here give the last chance to check if job removed from mirror-list 2769 * since we already pay some time on kthread_park */ 2770 if (job && list_empty(&job->base.node)) { 2771 kthread_unpark(ring->sched.thread); 2772 goto give_up_reset; 2773 } 2774 2775 if (amd_sched_invalidate_job(&job->base, amdgpu_job_hang_limit)) 2776 amd_sched_job_kickout(&job->base); 2777 2778 /* only do job_reset on the hang ring if @job not NULL */ 2779 amd_sched_hw_job_reset(&ring->sched); 2780 2781 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ 2782 amdgpu_fence_driver_force_completion_ring(ring); 2783 } 2784 2785 /* request to take full control of GPU before re-initialization */ 2786 if (job) 2787 amdgpu_virt_reset_gpu(adev); 2788 else 2789 amdgpu_virt_request_full_gpu(adev, true); 2790 2791 2792 /* Resume IP prior to SMC */ 2793 amdgpu_sriov_reinit_early(adev); 2794 2795 /* we need recover gart prior to run SMC/CP/SDMA resume */ 2796 amdgpu_ttm_recover_gart(adev); 2797 2798 /* now we are okay to resume SMC/CP/SDMA */ 2799 amdgpu_sriov_reinit_late(adev); 2800 2801 amdgpu_irq_gpu_reset_resume_helper(adev); 2802 2803 if (amdgpu_ib_ring_tests(adev)) 2804 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r); 2805 2806 /* release full control of GPU after ib test */ 2807 amdgpu_virt_release_full_gpu(adev, true); 2808 2809 DRM_INFO("recover vram bo from shadow\n"); 2810 2811 ring = adev->mman.buffer_funcs_ring; 2812 mutex_lock(&adev->shadow_list_lock); 2813 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) { 2814 next = NULL; 2815 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next); 2816 if (fence) { 2817 r = dma_fence_wait(fence, false); 2818 if (r) { 2819 WARN(r, "recovery from shadow isn't completed\n"); 2820 break; 2821 } 2822 } 2823 2824 dma_fence_put(fence); 2825 fence = next; 2826 } 2827 mutex_unlock(&adev->shadow_list_lock); 2828 2829 if (fence) { 2830 r = dma_fence_wait(fence, false); 2831 if (r) 2832 WARN(r, "recovery from shadow isn't completed\n"); 2833 } 2834 dma_fence_put(fence); 2835 2836 for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) { 2837 ring = adev->rings[i % AMDGPU_MAX_RINGS]; 2838 if (!ring || !ring->sched.thread) 2839 continue; 2840 2841 if (job && j != i) { 2842 kthread_unpark(ring->sched.thread); 2843 continue; 2844 } 2845 2846 amd_sched_job_recovery(&ring->sched); 2847 kthread_unpark(ring->sched.thread); 2848 } 2849 2850 drm_helper_resume_force_mode(adev->ddev); 2851 give_up_reset: 2852 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); 2853 if (r) { 2854 /* bad news, how to tell it to userspace ? */ 2855 dev_info(adev->dev, "GPU reset failed\n"); 2856 } else { 2857 dev_info(adev->dev, "GPU reset successed!\n"); 2858 } 2859 2860 adev->in_sriov_reset = false; 2861 mutex_unlock(&adev->virt.lock_reset); 2862 return r; 2863 } 2864 2865 /** 2866 * amdgpu_gpu_reset - reset the asic 2867 * 2868 * @adev: amdgpu device pointer 2869 * 2870 * Attempt the reset the GPU if it has hung (all asics). 2871 * Returns 0 for success or an error on failure. 2872 */ 2873 int amdgpu_gpu_reset(struct amdgpu_device *adev) 2874 { 2875 struct drm_atomic_state *state = NULL; 2876 int i, r; 2877 int resched; 2878 bool need_full_reset, vram_lost = false; 2879 2880 if (!amdgpu_check_soft_reset(adev)) { 2881 DRM_INFO("No hardware hang detected. Did some blocks stall?\n"); 2882 return 0; 2883 } 2884 2885 atomic_inc(&adev->gpu_reset_counter); 2886 2887 /* block TTM */ 2888 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); 2889 /* store modesetting */ 2890 if (amdgpu_device_has_dc_support(adev)) 2891 state = drm_atomic_helper_suspend(adev->ddev); 2892 2893 /* block scheduler */ 2894 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 2895 struct amdgpu_ring *ring = adev->rings[i]; 2896 2897 if (!ring || !ring->sched.thread) 2898 continue; 2899 kthread_park(ring->sched.thread); 2900 amd_sched_hw_job_reset(&ring->sched); 2901 } 2902 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ 2903 amdgpu_fence_driver_force_completion(adev); 2904 2905 need_full_reset = amdgpu_need_full_reset(adev); 2906 2907 if (!need_full_reset) { 2908 amdgpu_pre_soft_reset(adev); 2909 r = amdgpu_soft_reset(adev); 2910 amdgpu_post_soft_reset(adev); 2911 if (r || amdgpu_check_soft_reset(adev)) { 2912 DRM_INFO("soft reset failed, will fallback to full reset!\n"); 2913 need_full_reset = true; 2914 } 2915 } 2916 2917 if (need_full_reset) { 2918 r = amdgpu_suspend(adev); 2919 2920 retry: 2921 amdgpu_atombios_scratch_regs_save(adev); 2922 r = amdgpu_asic_reset(adev); 2923 amdgpu_atombios_scratch_regs_restore(adev); 2924 /* post card */ 2925 amdgpu_atom_asic_init(adev->mode_info.atom_context); 2926 2927 if (!r) { 2928 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); 2929 r = amdgpu_resume_phase1(adev); 2930 if (r) 2931 goto out; 2932 vram_lost = amdgpu_check_vram_lost(adev); 2933 if (vram_lost) { 2934 DRM_ERROR("VRAM is lost!\n"); 2935 atomic_inc(&adev->vram_lost_counter); 2936 } 2937 r = amdgpu_ttm_recover_gart(adev); 2938 if (r) 2939 goto out; 2940 r = amdgpu_resume_phase2(adev); 2941 if (r) 2942 goto out; 2943 if (vram_lost) 2944 amdgpu_fill_reset_magic(adev); 2945 } 2946 } 2947 out: 2948 if (!r) { 2949 amdgpu_irq_gpu_reset_resume_helper(adev); 2950 r = amdgpu_ib_ring_tests(adev); 2951 if (r) { 2952 dev_err(adev->dev, "ib ring test failed (%d).\n", r); 2953 r = amdgpu_suspend(adev); 2954 need_full_reset = true; 2955 goto retry; 2956 } 2957 /** 2958 * recovery vm page tables, since we cannot depend on VRAM is 2959 * consistent after gpu full reset. 2960 */ 2961 if (need_full_reset && amdgpu_need_backup(adev)) { 2962 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; 2963 struct amdgpu_bo *bo, *tmp; 2964 struct dma_fence *fence = NULL, *next = NULL; 2965 2966 DRM_INFO("recover vram bo from shadow\n"); 2967 mutex_lock(&adev->shadow_list_lock); 2968 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) { 2969 next = NULL; 2970 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next); 2971 if (fence) { 2972 r = dma_fence_wait(fence, false); 2973 if (r) { 2974 WARN(r, "recovery from shadow isn't completed\n"); 2975 break; 2976 } 2977 } 2978 2979 dma_fence_put(fence); 2980 fence = next; 2981 } 2982 mutex_unlock(&adev->shadow_list_lock); 2983 if (fence) { 2984 r = dma_fence_wait(fence, false); 2985 if (r) 2986 WARN(r, "recovery from shadow isn't completed\n"); 2987 } 2988 dma_fence_put(fence); 2989 } 2990 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 2991 struct amdgpu_ring *ring = adev->rings[i]; 2992 2993 if (!ring || !ring->sched.thread) 2994 continue; 2995 2996 amd_sched_job_recovery(&ring->sched); 2997 kthread_unpark(ring->sched.thread); 2998 } 2999 } else { 3000 dev_err(adev->dev, "asic resume failed (%d).\n", r); 3001 amdgpu_vf_error_put(AMDGIM_ERROR_VF_ASIC_RESUME_FAIL, 0, r); 3002 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 3003 if (adev->rings[i] && adev->rings[i]->sched.thread) { 3004 kthread_unpark(adev->rings[i]->sched.thread); 3005 } 3006 } 3007 } 3008 3009 if (amdgpu_device_has_dc_support(adev)) { 3010 r = drm_atomic_helper_resume(adev->ddev, state); 3011 amdgpu_dm_display_resume(adev); 3012 } else 3013 drm_helper_resume_force_mode(adev->ddev); 3014 3015 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); 3016 if (r) { 3017 /* bad news, how to tell it to userspace ? */ 3018 dev_info(adev->dev, "GPU reset failed\n"); 3019 amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); 3020 } 3021 else { 3022 dev_info(adev->dev, "GPU reset successed!\n"); 3023 } 3024 3025 amdgpu_vf_error_trans_all(adev); 3026 return r; 3027 } 3028 3029 void amdgpu_get_pcie_info(struct amdgpu_device *adev) 3030 { 3031 u32 mask; 3032 int ret; 3033 3034 if (amdgpu_pcie_gen_cap) 3035 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap; 3036 3037 if (amdgpu_pcie_lane_cap) 3038 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap; 3039 3040 /* covers APUs as well */ 3041 if (pci_is_root_bus(adev->pdev->bus)) { 3042 if (adev->pm.pcie_gen_mask == 0) 3043 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK; 3044 if (adev->pm.pcie_mlw_mask == 0) 3045 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK; 3046 return; 3047 } 3048 3049 if (adev->pm.pcie_gen_mask == 0) { 3050 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); 3051 if (!ret) { 3052 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 3053 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 3054 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); 3055 3056 if (mask & DRM_PCIE_SPEED_25) 3057 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; 3058 if (mask & DRM_PCIE_SPEED_50) 3059 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2; 3060 if (mask & DRM_PCIE_SPEED_80) 3061 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3; 3062 } else { 3063 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK; 3064 } 3065 } 3066 if (adev->pm.pcie_mlw_mask == 0) { 3067 ret = drm_pcie_get_max_link_width(adev->ddev, &mask); 3068 if (!ret) { 3069 switch (mask) { 3070 case 32: 3071 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | 3072 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 3073 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 3074 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 3075 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 3076 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 3077 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 3078 break; 3079 case 16: 3080 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 3081 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 3082 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 3083 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 3084 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 3085 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 3086 break; 3087 case 12: 3088 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 3089 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 3090 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 3091 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 3092 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 3093 break; 3094 case 8: 3095 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 3096 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 3097 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 3098 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 3099 break; 3100 case 4: 3101 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 3102 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 3103 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 3104 break; 3105 case 2: 3106 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 3107 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 3108 break; 3109 case 1: 3110 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; 3111 break; 3112 default: 3113 break; 3114 } 3115 } else { 3116 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK; 3117 } 3118 } 3119 } 3120 3121 /* 3122 * Debugfs 3123 */ 3124 int amdgpu_debugfs_add_files(struct amdgpu_device *adev, 3125 const struct drm_info_list *files, 3126 unsigned nfiles) 3127 { 3128 unsigned i; 3129 3130 for (i = 0; i < adev->debugfs_count; i++) { 3131 if (adev->debugfs[i].files == files) { 3132 /* Already registered */ 3133 return 0; 3134 } 3135 } 3136 3137 i = adev->debugfs_count + 1; 3138 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) { 3139 DRM_ERROR("Reached maximum number of debugfs components.\n"); 3140 DRM_ERROR("Report so we increase " 3141 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n"); 3142 return -EINVAL; 3143 } 3144 adev->debugfs[adev->debugfs_count].files = files; 3145 adev->debugfs[adev->debugfs_count].num_files = nfiles; 3146 adev->debugfs_count = i; 3147 #if defined(CONFIG_DEBUG_FS) 3148 drm_debugfs_create_files(files, nfiles, 3149 adev->ddev->primary->debugfs_root, 3150 adev->ddev->primary); 3151 #endif 3152 return 0; 3153 } 3154 3155 #if defined(CONFIG_DEBUG_FS) 3156 3157 static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, 3158 size_t size, loff_t *pos) 3159 { 3160 struct amdgpu_device *adev = file_inode(f)->i_private; 3161 ssize_t result = 0; 3162 int r; 3163 bool pm_pg_lock, use_bank; 3164 unsigned instance_bank, sh_bank, se_bank; 3165 3166 if (size & 0x3 || *pos & 0x3) 3167 return -EINVAL; 3168 3169 /* are we reading registers for which a PG lock is necessary? */ 3170 pm_pg_lock = (*pos >> 23) & 1; 3171 3172 if (*pos & (1ULL << 62)) { 3173 se_bank = (*pos >> 24) & 0x3FF; 3174 sh_bank = (*pos >> 34) & 0x3FF; 3175 instance_bank = (*pos >> 44) & 0x3FF; 3176 3177 if (se_bank == 0x3FF) 3178 se_bank = 0xFFFFFFFF; 3179 if (sh_bank == 0x3FF) 3180 sh_bank = 0xFFFFFFFF; 3181 if (instance_bank == 0x3FF) 3182 instance_bank = 0xFFFFFFFF; 3183 use_bank = 1; 3184 } else { 3185 use_bank = 0; 3186 } 3187 3188 *pos &= (1UL << 22) - 1; 3189 3190 if (use_bank) { 3191 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || 3192 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) 3193 return -EINVAL; 3194 mutex_lock(&adev->grbm_idx_mutex); 3195 amdgpu_gfx_select_se_sh(adev, se_bank, 3196 sh_bank, instance_bank); 3197 } 3198 3199 if (pm_pg_lock) 3200 mutex_lock(&adev->pm.mutex); 3201 3202 while (size) { 3203 uint32_t value; 3204 3205 if (*pos > adev->rmmio_size) 3206 goto end; 3207 3208 value = RREG32(*pos >> 2); 3209 r = put_user(value, (uint32_t *)buf); 3210 if (r) { 3211 result = r; 3212 goto end; 3213 } 3214 3215 result += 4; 3216 buf += 4; 3217 *pos += 4; 3218 size -= 4; 3219 } 3220 3221 end: 3222 if (use_bank) { 3223 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 3224 mutex_unlock(&adev->grbm_idx_mutex); 3225 } 3226 3227 if (pm_pg_lock) 3228 mutex_unlock(&adev->pm.mutex); 3229 3230 return result; 3231 } 3232 3233 static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, 3234 size_t size, loff_t *pos) 3235 { 3236 struct amdgpu_device *adev = file_inode(f)->i_private; 3237 ssize_t result = 0; 3238 int r; 3239 bool pm_pg_lock, use_bank; 3240 unsigned instance_bank, sh_bank, se_bank; 3241 3242 if (size & 0x3 || *pos & 0x3) 3243 return -EINVAL; 3244 3245 /* are we reading registers for which a PG lock is necessary? */ 3246 pm_pg_lock = (*pos >> 23) & 1; 3247 3248 if (*pos & (1ULL << 62)) { 3249 se_bank = (*pos >> 24) & 0x3FF; 3250 sh_bank = (*pos >> 34) & 0x3FF; 3251 instance_bank = (*pos >> 44) & 0x3FF; 3252 3253 if (se_bank == 0x3FF) 3254 se_bank = 0xFFFFFFFF; 3255 if (sh_bank == 0x3FF) 3256 sh_bank = 0xFFFFFFFF; 3257 if (instance_bank == 0x3FF) 3258 instance_bank = 0xFFFFFFFF; 3259 use_bank = 1; 3260 } else { 3261 use_bank = 0; 3262 } 3263 3264 *pos &= (1UL << 22) - 1; 3265 3266 if (use_bank) { 3267 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || 3268 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) 3269 return -EINVAL; 3270 mutex_lock(&adev->grbm_idx_mutex); 3271 amdgpu_gfx_select_se_sh(adev, se_bank, 3272 sh_bank, instance_bank); 3273 } 3274 3275 if (pm_pg_lock) 3276 mutex_lock(&adev->pm.mutex); 3277 3278 while (size) { 3279 uint32_t value; 3280 3281 if (*pos > adev->rmmio_size) 3282 return result; 3283 3284 r = get_user(value, (uint32_t *)buf); 3285 if (r) 3286 return r; 3287 3288 WREG32(*pos >> 2, value); 3289 3290 result += 4; 3291 buf += 4; 3292 *pos += 4; 3293 size -= 4; 3294 } 3295 3296 if (use_bank) { 3297 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 3298 mutex_unlock(&adev->grbm_idx_mutex); 3299 } 3300 3301 if (pm_pg_lock) 3302 mutex_unlock(&adev->pm.mutex); 3303 3304 return result; 3305 } 3306 3307 static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, 3308 size_t size, loff_t *pos) 3309 { 3310 struct amdgpu_device *adev = file_inode(f)->i_private; 3311 ssize_t result = 0; 3312 int r; 3313 3314 if (size & 0x3 || *pos & 0x3) 3315 return -EINVAL; 3316 3317 while (size) { 3318 uint32_t value; 3319 3320 value = RREG32_PCIE(*pos >> 2); 3321 r = put_user(value, (uint32_t *)buf); 3322 if (r) 3323 return r; 3324 3325 result += 4; 3326 buf += 4; 3327 *pos += 4; 3328 size -= 4; 3329 } 3330 3331 return result; 3332 } 3333 3334 static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf, 3335 size_t size, loff_t *pos) 3336 { 3337 struct amdgpu_device *adev = file_inode(f)->i_private; 3338 ssize_t result = 0; 3339 int r; 3340 3341 if (size & 0x3 || *pos & 0x3) 3342 return -EINVAL; 3343 3344 while (size) { 3345 uint32_t value; 3346 3347 r = get_user(value, (uint32_t *)buf); 3348 if (r) 3349 return r; 3350 3351 WREG32_PCIE(*pos >> 2, value); 3352 3353 result += 4; 3354 buf += 4; 3355 *pos += 4; 3356 size -= 4; 3357 } 3358 3359 return result; 3360 } 3361 3362 static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, 3363 size_t size, loff_t *pos) 3364 { 3365 struct amdgpu_device *adev = file_inode(f)->i_private; 3366 ssize_t result = 0; 3367 int r; 3368 3369 if (size & 0x3 || *pos & 0x3) 3370 return -EINVAL; 3371 3372 while (size) { 3373 uint32_t value; 3374 3375 value = RREG32_DIDT(*pos >> 2); 3376 r = put_user(value, (uint32_t *)buf); 3377 if (r) 3378 return r; 3379 3380 result += 4; 3381 buf += 4; 3382 *pos += 4; 3383 size -= 4; 3384 } 3385 3386 return result; 3387 } 3388 3389 static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf, 3390 size_t size, loff_t *pos) 3391 { 3392 struct amdgpu_device *adev = file_inode(f)->i_private; 3393 ssize_t result = 0; 3394 int r; 3395 3396 if (size & 0x3 || *pos & 0x3) 3397 return -EINVAL; 3398 3399 while (size) { 3400 uint32_t value; 3401 3402 r = get_user(value, (uint32_t *)buf); 3403 if (r) 3404 return r; 3405 3406 WREG32_DIDT(*pos >> 2, value); 3407 3408 result += 4; 3409 buf += 4; 3410 *pos += 4; 3411 size -= 4; 3412 } 3413 3414 return result; 3415 } 3416 3417 static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, 3418 size_t size, loff_t *pos) 3419 { 3420 struct amdgpu_device *adev = file_inode(f)->i_private; 3421 ssize_t result = 0; 3422 int r; 3423 3424 if (size & 0x3 || *pos & 0x3) 3425 return -EINVAL; 3426 3427 while (size) { 3428 uint32_t value; 3429 3430 value = RREG32_SMC(*pos); 3431 r = put_user(value, (uint32_t *)buf); 3432 if (r) 3433 return r; 3434 3435 result += 4; 3436 buf += 4; 3437 *pos += 4; 3438 size -= 4; 3439 } 3440 3441 return result; 3442 } 3443 3444 static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf, 3445 size_t size, loff_t *pos) 3446 { 3447 struct amdgpu_device *adev = file_inode(f)->i_private; 3448 ssize_t result = 0; 3449 int r; 3450 3451 if (size & 0x3 || *pos & 0x3) 3452 return -EINVAL; 3453 3454 while (size) { 3455 uint32_t value; 3456 3457 r = get_user(value, (uint32_t *)buf); 3458 if (r) 3459 return r; 3460 3461 WREG32_SMC(*pos, value); 3462 3463 result += 4; 3464 buf += 4; 3465 *pos += 4; 3466 size -= 4; 3467 } 3468 3469 return result; 3470 } 3471 3472 static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, 3473 size_t size, loff_t *pos) 3474 { 3475 struct amdgpu_device *adev = file_inode(f)->i_private; 3476 ssize_t result = 0; 3477 int r; 3478 uint32_t *config, no_regs = 0; 3479 3480 if (size & 0x3 || *pos & 0x3) 3481 return -EINVAL; 3482 3483 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL); 3484 if (!config) 3485 return -ENOMEM; 3486 3487 /* version, increment each time something is added */ 3488 config[no_regs++] = 3; 3489 config[no_regs++] = adev->gfx.config.max_shader_engines; 3490 config[no_regs++] = adev->gfx.config.max_tile_pipes; 3491 config[no_regs++] = adev->gfx.config.max_cu_per_sh; 3492 config[no_regs++] = adev->gfx.config.max_sh_per_se; 3493 config[no_regs++] = adev->gfx.config.max_backends_per_se; 3494 config[no_regs++] = adev->gfx.config.max_texture_channel_caches; 3495 config[no_regs++] = adev->gfx.config.max_gprs; 3496 config[no_regs++] = adev->gfx.config.max_gs_threads; 3497 config[no_regs++] = adev->gfx.config.max_hw_contexts; 3498 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend; 3499 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend; 3500 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size; 3501 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size; 3502 config[no_regs++] = adev->gfx.config.num_tile_pipes; 3503 config[no_regs++] = adev->gfx.config.backend_enable_mask; 3504 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes; 3505 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb; 3506 config[no_regs++] = adev->gfx.config.shader_engine_tile_size; 3507 config[no_regs++] = adev->gfx.config.num_gpus; 3508 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size; 3509 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg; 3510 config[no_regs++] = adev->gfx.config.gb_addr_config; 3511 config[no_regs++] = adev->gfx.config.num_rbs; 3512 3513 /* rev==1 */ 3514 config[no_regs++] = adev->rev_id; 3515 config[no_regs++] = adev->pg_flags; 3516 config[no_regs++] = adev->cg_flags; 3517 3518 /* rev==2 */ 3519 config[no_regs++] = adev->family; 3520 config[no_regs++] = adev->external_rev_id; 3521 3522 /* rev==3 */ 3523 config[no_regs++] = adev->pdev->device; 3524 config[no_regs++] = adev->pdev->revision; 3525 config[no_regs++] = adev->pdev->subsystem_device; 3526 config[no_regs++] = adev->pdev->subsystem_vendor; 3527 3528 while (size && (*pos < no_regs * 4)) { 3529 uint32_t value; 3530 3531 value = config[*pos >> 2]; 3532 r = put_user(value, (uint32_t *)buf); 3533 if (r) { 3534 kfree(config); 3535 return r; 3536 } 3537 3538 result += 4; 3539 buf += 4; 3540 *pos += 4; 3541 size -= 4; 3542 } 3543 3544 kfree(config); 3545 return result; 3546 } 3547 3548 static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, 3549 size_t size, loff_t *pos) 3550 { 3551 struct amdgpu_device *adev = file_inode(f)->i_private; 3552 int idx, x, outsize, r, valuesize; 3553 uint32_t values[16]; 3554 3555 if (size & 3 || *pos & 0x3) 3556 return -EINVAL; 3557 3558 if (amdgpu_dpm == 0) 3559 return -EINVAL; 3560 3561 /* convert offset to sensor number */ 3562 idx = *pos >> 2; 3563 3564 valuesize = sizeof(values); 3565 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor) 3566 r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize); 3567 else 3568 return -EINVAL; 3569 3570 if (size > valuesize) 3571 return -EINVAL; 3572 3573 outsize = 0; 3574 x = 0; 3575 if (!r) { 3576 while (size) { 3577 r = put_user(values[x++], (int32_t *)buf); 3578 buf += 4; 3579 size -= 4; 3580 outsize += 4; 3581 } 3582 } 3583 3584 return !r ? outsize : r; 3585 } 3586 3587 static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, 3588 size_t size, loff_t *pos) 3589 { 3590 struct amdgpu_device *adev = f->f_inode->i_private; 3591 int r, x; 3592 ssize_t result=0; 3593 uint32_t offset, se, sh, cu, wave, simd, data[32]; 3594 3595 if (size & 3 || *pos & 3) 3596 return -EINVAL; 3597 3598 /* decode offset */ 3599 offset = (*pos & 0x7F); 3600 se = ((*pos >> 7) & 0xFF); 3601 sh = ((*pos >> 15) & 0xFF); 3602 cu = ((*pos >> 23) & 0xFF); 3603 wave = ((*pos >> 31) & 0xFF); 3604 simd = ((*pos >> 37) & 0xFF); 3605 3606 /* switch to the specific se/sh/cu */ 3607 mutex_lock(&adev->grbm_idx_mutex); 3608 amdgpu_gfx_select_se_sh(adev, se, sh, cu); 3609 3610 x = 0; 3611 if (adev->gfx.funcs->read_wave_data) 3612 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x); 3613 3614 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); 3615 mutex_unlock(&adev->grbm_idx_mutex); 3616 3617 if (!x) 3618 return -EINVAL; 3619 3620 while (size && (offset < x * 4)) { 3621 uint32_t value; 3622 3623 value = data[offset >> 2]; 3624 r = put_user(value, (uint32_t *)buf); 3625 if (r) 3626 return r; 3627 3628 result += 4; 3629 buf += 4; 3630 offset += 4; 3631 size -= 4; 3632 } 3633 3634 return result; 3635 } 3636 3637 static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, 3638 size_t size, loff_t *pos) 3639 { 3640 struct amdgpu_device *adev = f->f_inode->i_private; 3641 int r; 3642 ssize_t result = 0; 3643 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data; 3644 3645 if (size & 3 || *pos & 3) 3646 return -EINVAL; 3647 3648 /* decode offset */ 3649 offset = (*pos & 0xFFF); /* in dwords */ 3650 se = ((*pos >> 12) & 0xFF); 3651 sh = ((*pos >> 20) & 0xFF); 3652 cu = ((*pos >> 28) & 0xFF); 3653 wave = ((*pos >> 36) & 0xFF); 3654 simd = ((*pos >> 44) & 0xFF); 3655 thread = ((*pos >> 52) & 0xFF); 3656 bank = ((*pos >> 60) & 1); 3657 3658 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL); 3659 if (!data) 3660 return -ENOMEM; 3661 3662 /* switch to the specific se/sh/cu */ 3663 mutex_lock(&adev->grbm_idx_mutex); 3664 amdgpu_gfx_select_se_sh(adev, se, sh, cu); 3665 3666 if (bank == 0) { 3667 if (adev->gfx.funcs->read_wave_vgprs) 3668 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data); 3669 } else { 3670 if (adev->gfx.funcs->read_wave_sgprs) 3671 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data); 3672 } 3673 3674 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); 3675 mutex_unlock(&adev->grbm_idx_mutex); 3676 3677 while (size) { 3678 uint32_t value; 3679 3680 value = data[offset++]; 3681 r = put_user(value, (uint32_t *)buf); 3682 if (r) { 3683 result = r; 3684 goto err; 3685 } 3686 3687 result += 4; 3688 buf += 4; 3689 size -= 4; 3690 } 3691 3692 err: 3693 kfree(data); 3694 return result; 3695 } 3696 3697 static const struct file_operations amdgpu_debugfs_regs_fops = { 3698 .owner = THIS_MODULE, 3699 .read = amdgpu_debugfs_regs_read, 3700 .write = amdgpu_debugfs_regs_write, 3701 .llseek = default_llseek 3702 }; 3703 static const struct file_operations amdgpu_debugfs_regs_didt_fops = { 3704 .owner = THIS_MODULE, 3705 .read = amdgpu_debugfs_regs_didt_read, 3706 .write = amdgpu_debugfs_regs_didt_write, 3707 .llseek = default_llseek 3708 }; 3709 static const struct file_operations amdgpu_debugfs_regs_pcie_fops = { 3710 .owner = THIS_MODULE, 3711 .read = amdgpu_debugfs_regs_pcie_read, 3712 .write = amdgpu_debugfs_regs_pcie_write, 3713 .llseek = default_llseek 3714 }; 3715 static const struct file_operations amdgpu_debugfs_regs_smc_fops = { 3716 .owner = THIS_MODULE, 3717 .read = amdgpu_debugfs_regs_smc_read, 3718 .write = amdgpu_debugfs_regs_smc_write, 3719 .llseek = default_llseek 3720 }; 3721 3722 static const struct file_operations amdgpu_debugfs_gca_config_fops = { 3723 .owner = THIS_MODULE, 3724 .read = amdgpu_debugfs_gca_config_read, 3725 .llseek = default_llseek 3726 }; 3727 3728 static const struct file_operations amdgpu_debugfs_sensors_fops = { 3729 .owner = THIS_MODULE, 3730 .read = amdgpu_debugfs_sensor_read, 3731 .llseek = default_llseek 3732 }; 3733 3734 static const struct file_operations amdgpu_debugfs_wave_fops = { 3735 .owner = THIS_MODULE, 3736 .read = amdgpu_debugfs_wave_read, 3737 .llseek = default_llseek 3738 }; 3739 static const struct file_operations amdgpu_debugfs_gpr_fops = { 3740 .owner = THIS_MODULE, 3741 .read = amdgpu_debugfs_gpr_read, 3742 .llseek = default_llseek 3743 }; 3744 3745 static const struct file_operations *debugfs_regs[] = { 3746 &amdgpu_debugfs_regs_fops, 3747 &amdgpu_debugfs_regs_didt_fops, 3748 &amdgpu_debugfs_regs_pcie_fops, 3749 &amdgpu_debugfs_regs_smc_fops, 3750 &amdgpu_debugfs_gca_config_fops, 3751 &amdgpu_debugfs_sensors_fops, 3752 &amdgpu_debugfs_wave_fops, 3753 &amdgpu_debugfs_gpr_fops, 3754 }; 3755 3756 static const char *debugfs_regs_names[] = { 3757 "amdgpu_regs", 3758 "amdgpu_regs_didt", 3759 "amdgpu_regs_pcie", 3760 "amdgpu_regs_smc", 3761 "amdgpu_gca_config", 3762 "amdgpu_sensors", 3763 "amdgpu_wave", 3764 "amdgpu_gpr", 3765 }; 3766 3767 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) 3768 { 3769 struct drm_minor *minor = adev->ddev->primary; 3770 struct dentry *ent, *root = minor->debugfs_root; 3771 unsigned i, j; 3772 3773 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { 3774 ent = debugfs_create_file(debugfs_regs_names[i], 3775 S_IFREG | S_IRUGO, root, 3776 adev, debugfs_regs[i]); 3777 if (IS_ERR(ent)) { 3778 for (j = 0; j < i; j++) { 3779 debugfs_remove(adev->debugfs_regs[i]); 3780 adev->debugfs_regs[i] = NULL; 3781 } 3782 return PTR_ERR(ent); 3783 } 3784 3785 if (!i) 3786 i_size_write(ent->d_inode, adev->rmmio_size); 3787 adev->debugfs_regs[i] = ent; 3788 } 3789 3790 return 0; 3791 } 3792 3793 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) 3794 { 3795 unsigned i; 3796 3797 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { 3798 if (adev->debugfs_regs[i]) { 3799 debugfs_remove(adev->debugfs_regs[i]); 3800 adev->debugfs_regs[i] = NULL; 3801 } 3802 } 3803 } 3804 3805 static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data) 3806 { 3807 struct drm_info_node *node = (struct drm_info_node *) m->private; 3808 struct drm_device *dev = node->minor->dev; 3809 struct amdgpu_device *adev = dev->dev_private; 3810 int r = 0, i; 3811 3812 /* hold on the scheduler */ 3813 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 3814 struct amdgpu_ring *ring = adev->rings[i]; 3815 3816 if (!ring || !ring->sched.thread) 3817 continue; 3818 kthread_park(ring->sched.thread); 3819 } 3820 3821 seq_printf(m, "run ib test:\n"); 3822 r = amdgpu_ib_ring_tests(adev); 3823 if (r) 3824 seq_printf(m, "ib ring tests failed (%d).\n", r); 3825 else 3826 seq_printf(m, "ib ring tests passed.\n"); 3827 3828 /* go on the scheduler */ 3829 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 3830 struct amdgpu_ring *ring = adev->rings[i]; 3831 3832 if (!ring || !ring->sched.thread) 3833 continue; 3834 kthread_unpark(ring->sched.thread); 3835 } 3836 3837 return 0; 3838 } 3839 3840 static const struct drm_info_list amdgpu_debugfs_test_ib_ring_list[] = { 3841 {"amdgpu_test_ib", &amdgpu_debugfs_test_ib} 3842 }; 3843 3844 static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev) 3845 { 3846 return amdgpu_debugfs_add_files(adev, 3847 amdgpu_debugfs_test_ib_ring_list, 1); 3848 } 3849 3850 int amdgpu_debugfs_init(struct drm_minor *minor) 3851 { 3852 return 0; 3853 } 3854 3855 static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data) 3856 { 3857 struct drm_info_node *node = (struct drm_info_node *) m->private; 3858 struct drm_device *dev = node->minor->dev; 3859 struct amdgpu_device *adev = dev->dev_private; 3860 3861 seq_write(m, adev->bios, adev->bios_size); 3862 return 0; 3863 } 3864 3865 static const struct drm_info_list amdgpu_vbios_dump_list[] = { 3866 {"amdgpu_vbios", 3867 amdgpu_debugfs_get_vbios_dump, 3868 0, NULL}, 3869 }; 3870 3871 static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev) 3872 { 3873 return amdgpu_debugfs_add_files(adev, 3874 amdgpu_vbios_dump_list, 1); 3875 } 3876 #else 3877 static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev) 3878 { 3879 return 0; 3880 } 3881 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) 3882 { 3883 return 0; 3884 } 3885 static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev) 3886 { 3887 return 0; 3888 } 3889 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { } 3890 #endif 3891