1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 */ 25 26 #include <linux/kthread.h> 27 #include <linux/pci.h> 28 #include <linux/uaccess.h> 29 #include <linux/pm_runtime.h> 30 31 #include <drm/drm_debugfs.h> 32 33 #include "amdgpu.h" 34 #include "amdgpu_pm.h" 35 #include "amdgpu_dm_debugfs.h" 36 #include "amdgpu_ras.h" 37 38 /** 39 * amdgpu_debugfs_add_files - Add simple debugfs entries 40 * 41 * @adev: Device to attach debugfs entries to 42 * @files: Array of function callbacks that respond to reads 43 * @nfiles: Number of callbacks to register 44 * 45 */ 46 int amdgpu_debugfs_add_files(struct amdgpu_device *adev, 47 const struct drm_info_list *files, 48 unsigned nfiles) 49 { 50 unsigned i; 51 52 for (i = 0; i < adev->debugfs_count; i++) { 53 if (adev->debugfs[i].files == files) { 54 /* Already registered */ 55 return 0; 56 } 57 } 58 59 i = adev->debugfs_count + 1; 60 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) { 61 DRM_ERROR("Reached maximum number of debugfs components.\n"); 62 DRM_ERROR("Report so we increase " 63 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n"); 64 return -EINVAL; 65 } 66 adev->debugfs[adev->debugfs_count].files = files; 67 adev->debugfs[adev->debugfs_count].num_files = nfiles; 68 adev->debugfs_count = i; 69 #if defined(CONFIG_DEBUG_FS) 70 drm_debugfs_create_files(files, nfiles, 71 adev->ddev->primary->debugfs_root, 72 adev->ddev->primary); 73 #endif 74 return 0; 75 } 76 77 #if defined(CONFIG_DEBUG_FS) 78 79 /** 80 * amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes 81 * 82 * @read: True if reading 83 * @f: open file handle 84 * @buf: User buffer to write/read to 85 * @size: Number of bytes to write/read 86 * @pos: Offset to seek to 87 * 88 * This debugfs entry has special meaning on the offset being sought. 89 * Various bits have different meanings: 90 * 91 * Bit 62: Indicates a GRBM bank switch is needed 92 * Bit 61: Indicates a SRBM bank switch is needed (implies bit 62 is 93 * zero) 94 * Bits 24..33: The SE or ME selector if needed 95 * Bits 34..43: The SH (or SA) or PIPE selector if needed 96 * Bits 44..53: The INSTANCE (or CU/WGP) or QUEUE selector if needed 97 * 98 * Bit 23: Indicates that the PM power gating lock should be held 99 * This is necessary to read registers that might be 100 * unreliable during a power gating transistion. 101 * 102 * The lower bits are the BYTE offset of the register to read. This 103 * allows reading multiple registers in a single call and having 104 * the returned size reflect that. 105 */ 106 static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, 107 char __user *buf, size_t size, loff_t *pos) 108 { 109 struct amdgpu_device *adev = file_inode(f)->i_private; 110 ssize_t result = 0; 111 int r; 112 bool pm_pg_lock, use_bank, use_ring; 113 unsigned instance_bank, sh_bank, se_bank, me, pipe, queue, vmid; 114 115 pm_pg_lock = use_bank = use_ring = false; 116 instance_bank = sh_bank = se_bank = me = pipe = queue = vmid = 0; 117 118 if (size & 0x3 || *pos & 0x3 || 119 ((*pos & (1ULL << 62)) && (*pos & (1ULL << 61)))) 120 return -EINVAL; 121 122 /* are we reading registers for which a PG lock is necessary? */ 123 pm_pg_lock = (*pos >> 23) & 1; 124 125 if (*pos & (1ULL << 62)) { 126 se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24; 127 sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34; 128 instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44; 129 130 if (se_bank == 0x3FF) 131 se_bank = 0xFFFFFFFF; 132 if (sh_bank == 0x3FF) 133 sh_bank = 0xFFFFFFFF; 134 if (instance_bank == 0x3FF) 135 instance_bank = 0xFFFFFFFF; 136 use_bank = true; 137 } else if (*pos & (1ULL << 61)) { 138 139 me = (*pos & GENMASK_ULL(33, 24)) >> 24; 140 pipe = (*pos & GENMASK_ULL(43, 34)) >> 34; 141 queue = (*pos & GENMASK_ULL(53, 44)) >> 44; 142 vmid = (*pos & GENMASK_ULL(58, 54)) >> 54; 143 144 use_ring = true; 145 } else { 146 use_bank = use_ring = false; 147 } 148 149 *pos &= (1UL << 22) - 1; 150 151 r = pm_runtime_get_sync(adev->ddev->dev); 152 if (r < 0) 153 return r; 154 155 r = amdgpu_virt_enable_access_debugfs(adev); 156 if (r < 0) 157 return r; 158 159 if (use_bank) { 160 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || 161 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) { 162 pm_runtime_mark_last_busy(adev->ddev->dev); 163 pm_runtime_put_autosuspend(adev->ddev->dev); 164 amdgpu_virt_disable_access_debugfs(adev); 165 return -EINVAL; 166 } 167 mutex_lock(&adev->grbm_idx_mutex); 168 amdgpu_gfx_select_se_sh(adev, se_bank, 169 sh_bank, instance_bank); 170 } else if (use_ring) { 171 mutex_lock(&adev->srbm_mutex); 172 amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid); 173 } 174 175 if (pm_pg_lock) 176 mutex_lock(&adev->pm.mutex); 177 178 while (size) { 179 uint32_t value; 180 181 if (read) { 182 value = RREG32(*pos >> 2); 183 r = put_user(value, (uint32_t *)buf); 184 } else { 185 r = get_user(value, (uint32_t *)buf); 186 if (!r) 187 amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value, 0); 188 } 189 if (r) { 190 result = r; 191 goto end; 192 } 193 194 result += 4; 195 buf += 4; 196 *pos += 4; 197 size -= 4; 198 } 199 200 end: 201 if (use_bank) { 202 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 203 mutex_unlock(&adev->grbm_idx_mutex); 204 } else if (use_ring) { 205 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0); 206 mutex_unlock(&adev->srbm_mutex); 207 } 208 209 if (pm_pg_lock) 210 mutex_unlock(&adev->pm.mutex); 211 212 pm_runtime_mark_last_busy(adev->ddev->dev); 213 pm_runtime_put_autosuspend(adev->ddev->dev); 214 215 amdgpu_virt_disable_access_debugfs(adev); 216 return result; 217 } 218 219 /** 220 * amdgpu_debugfs_regs_read - Callback for reading MMIO registers 221 */ 222 static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, 223 size_t size, loff_t *pos) 224 { 225 return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos); 226 } 227 228 /** 229 * amdgpu_debugfs_regs_write - Callback for writing MMIO registers 230 */ 231 static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, 232 size_t size, loff_t *pos) 233 { 234 return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos); 235 } 236 237 238 /** 239 * amdgpu_debugfs_regs_pcie_read - Read from a PCIE register 240 * 241 * @f: open file handle 242 * @buf: User buffer to store read data in 243 * @size: Number of bytes to read 244 * @pos: Offset to seek to 245 * 246 * The lower bits are the BYTE offset of the register to read. This 247 * allows reading multiple registers in a single call and having 248 * the returned size reflect that. 249 */ 250 static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, 251 size_t size, loff_t *pos) 252 { 253 struct amdgpu_device *adev = file_inode(f)->i_private; 254 ssize_t result = 0; 255 int r; 256 257 if (size & 0x3 || *pos & 0x3) 258 return -EINVAL; 259 260 r = pm_runtime_get_sync(adev->ddev->dev); 261 if (r < 0) 262 return r; 263 264 r = amdgpu_virt_enable_access_debugfs(adev); 265 if (r < 0) 266 return r; 267 268 while (size) { 269 uint32_t value; 270 271 value = RREG32_PCIE(*pos >> 2); 272 r = put_user(value, (uint32_t *)buf); 273 if (r) { 274 pm_runtime_mark_last_busy(adev->ddev->dev); 275 pm_runtime_put_autosuspend(adev->ddev->dev); 276 amdgpu_virt_disable_access_debugfs(adev); 277 return r; 278 } 279 280 result += 4; 281 buf += 4; 282 *pos += 4; 283 size -= 4; 284 } 285 286 pm_runtime_mark_last_busy(adev->ddev->dev); 287 pm_runtime_put_autosuspend(adev->ddev->dev); 288 289 amdgpu_virt_disable_access_debugfs(adev); 290 return result; 291 } 292 293 /** 294 * amdgpu_debugfs_regs_pcie_write - Write to a PCIE register 295 * 296 * @f: open file handle 297 * @buf: User buffer to write data from 298 * @size: Number of bytes to write 299 * @pos: Offset to seek to 300 * 301 * The lower bits are the BYTE offset of the register to write. This 302 * allows writing multiple registers in a single call and having 303 * the returned size reflect that. 304 */ 305 static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf, 306 size_t size, loff_t *pos) 307 { 308 struct amdgpu_device *adev = file_inode(f)->i_private; 309 ssize_t result = 0; 310 int r; 311 312 if (size & 0x3 || *pos & 0x3) 313 return -EINVAL; 314 315 r = pm_runtime_get_sync(adev->ddev->dev); 316 if (r < 0) 317 return r; 318 319 r = amdgpu_virt_enable_access_debugfs(adev); 320 if (r < 0) 321 return r; 322 323 while (size) { 324 uint32_t value; 325 326 r = get_user(value, (uint32_t *)buf); 327 if (r) { 328 pm_runtime_mark_last_busy(adev->ddev->dev); 329 pm_runtime_put_autosuspend(adev->ddev->dev); 330 amdgpu_virt_disable_access_debugfs(adev); 331 return r; 332 } 333 334 WREG32_PCIE(*pos >> 2, value); 335 336 result += 4; 337 buf += 4; 338 *pos += 4; 339 size -= 4; 340 } 341 342 pm_runtime_mark_last_busy(adev->ddev->dev); 343 pm_runtime_put_autosuspend(adev->ddev->dev); 344 345 amdgpu_virt_disable_access_debugfs(adev); 346 return result; 347 } 348 349 /** 350 * amdgpu_debugfs_regs_didt_read - Read from a DIDT register 351 * 352 * @f: open file handle 353 * @buf: User buffer to store read data in 354 * @size: Number of bytes to read 355 * @pos: Offset to seek to 356 * 357 * The lower bits are the BYTE offset of the register to read. This 358 * allows reading multiple registers in a single call and having 359 * the returned size reflect that. 360 */ 361 static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, 362 size_t size, loff_t *pos) 363 { 364 struct amdgpu_device *adev = file_inode(f)->i_private; 365 ssize_t result = 0; 366 int r; 367 368 if (size & 0x3 || *pos & 0x3) 369 return -EINVAL; 370 371 r = pm_runtime_get_sync(adev->ddev->dev); 372 if (r < 0) 373 return r; 374 375 r = amdgpu_virt_enable_access_debugfs(adev); 376 if (r < 0) 377 return r; 378 379 while (size) { 380 uint32_t value; 381 382 value = RREG32_DIDT(*pos >> 2); 383 r = put_user(value, (uint32_t *)buf); 384 if (r) { 385 pm_runtime_mark_last_busy(adev->ddev->dev); 386 pm_runtime_put_autosuspend(adev->ddev->dev); 387 amdgpu_virt_disable_access_debugfs(adev); 388 return r; 389 } 390 391 result += 4; 392 buf += 4; 393 *pos += 4; 394 size -= 4; 395 } 396 397 pm_runtime_mark_last_busy(adev->ddev->dev); 398 pm_runtime_put_autosuspend(adev->ddev->dev); 399 400 amdgpu_virt_disable_access_debugfs(adev); 401 return result; 402 } 403 404 /** 405 * amdgpu_debugfs_regs_didt_write - Write to a DIDT register 406 * 407 * @f: open file handle 408 * @buf: User buffer to write data from 409 * @size: Number of bytes to write 410 * @pos: Offset to seek to 411 * 412 * The lower bits are the BYTE offset of the register to write. This 413 * allows writing multiple registers in a single call and having 414 * the returned size reflect that. 415 */ 416 static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf, 417 size_t size, loff_t *pos) 418 { 419 struct amdgpu_device *adev = file_inode(f)->i_private; 420 ssize_t result = 0; 421 int r; 422 423 if (size & 0x3 || *pos & 0x3) 424 return -EINVAL; 425 426 r = pm_runtime_get_sync(adev->ddev->dev); 427 if (r < 0) 428 return r; 429 430 r = amdgpu_virt_enable_access_debugfs(adev); 431 if (r < 0) 432 return r; 433 434 while (size) { 435 uint32_t value; 436 437 r = get_user(value, (uint32_t *)buf); 438 if (r) { 439 pm_runtime_mark_last_busy(adev->ddev->dev); 440 pm_runtime_put_autosuspend(adev->ddev->dev); 441 amdgpu_virt_disable_access_debugfs(adev); 442 return r; 443 } 444 445 WREG32_DIDT(*pos >> 2, value); 446 447 result += 4; 448 buf += 4; 449 *pos += 4; 450 size -= 4; 451 } 452 453 pm_runtime_mark_last_busy(adev->ddev->dev); 454 pm_runtime_put_autosuspend(adev->ddev->dev); 455 456 amdgpu_virt_disable_access_debugfs(adev); 457 return result; 458 } 459 460 /** 461 * amdgpu_debugfs_regs_smc_read - Read from a SMC register 462 * 463 * @f: open file handle 464 * @buf: User buffer to store read data in 465 * @size: Number of bytes to read 466 * @pos: Offset to seek to 467 * 468 * The lower bits are the BYTE offset of the register to read. This 469 * allows reading multiple registers in a single call and having 470 * the returned size reflect that. 471 */ 472 static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, 473 size_t size, loff_t *pos) 474 { 475 struct amdgpu_device *adev = file_inode(f)->i_private; 476 ssize_t result = 0; 477 int r; 478 479 if (size & 0x3 || *pos & 0x3) 480 return -EINVAL; 481 482 r = pm_runtime_get_sync(adev->ddev->dev); 483 if (r < 0) 484 return r; 485 486 r = amdgpu_virt_enable_access_debugfs(adev); 487 if (r < 0) 488 return r; 489 490 while (size) { 491 uint32_t value; 492 493 value = RREG32_SMC(*pos); 494 r = put_user(value, (uint32_t *)buf); 495 if (r) { 496 pm_runtime_mark_last_busy(adev->ddev->dev); 497 pm_runtime_put_autosuspend(adev->ddev->dev); 498 amdgpu_virt_disable_access_debugfs(adev); 499 return r; 500 } 501 502 result += 4; 503 buf += 4; 504 *pos += 4; 505 size -= 4; 506 } 507 508 pm_runtime_mark_last_busy(adev->ddev->dev); 509 pm_runtime_put_autosuspend(adev->ddev->dev); 510 511 amdgpu_virt_disable_access_debugfs(adev); 512 return result; 513 } 514 515 /** 516 * amdgpu_debugfs_regs_smc_write - Write to a SMC register 517 * 518 * @f: open file handle 519 * @buf: User buffer to write data from 520 * @size: Number of bytes to write 521 * @pos: Offset to seek to 522 * 523 * The lower bits are the BYTE offset of the register to write. This 524 * allows writing multiple registers in a single call and having 525 * the returned size reflect that. 526 */ 527 static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf, 528 size_t size, loff_t *pos) 529 { 530 struct amdgpu_device *adev = file_inode(f)->i_private; 531 ssize_t result = 0; 532 int r; 533 534 if (size & 0x3 || *pos & 0x3) 535 return -EINVAL; 536 537 r = pm_runtime_get_sync(adev->ddev->dev); 538 if (r < 0) 539 return r; 540 541 r = amdgpu_virt_enable_access_debugfs(adev); 542 if (r < 0) 543 return r; 544 545 while (size) { 546 uint32_t value; 547 548 r = get_user(value, (uint32_t *)buf); 549 if (r) { 550 pm_runtime_mark_last_busy(adev->ddev->dev); 551 pm_runtime_put_autosuspend(adev->ddev->dev); 552 amdgpu_virt_disable_access_debugfs(adev); 553 return r; 554 } 555 556 WREG32_SMC(*pos, value); 557 558 result += 4; 559 buf += 4; 560 *pos += 4; 561 size -= 4; 562 } 563 564 pm_runtime_mark_last_busy(adev->ddev->dev); 565 pm_runtime_put_autosuspend(adev->ddev->dev); 566 567 amdgpu_virt_disable_access_debugfs(adev); 568 return result; 569 } 570 571 /** 572 * amdgpu_debugfs_gca_config_read - Read from gfx config data 573 * 574 * @f: open file handle 575 * @buf: User buffer to store read data in 576 * @size: Number of bytes to read 577 * @pos: Offset to seek to 578 * 579 * This file is used to access configuration data in a somewhat 580 * stable fashion. The format is a series of DWORDs with the first 581 * indicating which revision it is. New content is appended to the 582 * end so that older software can still read the data. 583 */ 584 585 static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, 586 size_t size, loff_t *pos) 587 { 588 struct amdgpu_device *adev = file_inode(f)->i_private; 589 ssize_t result = 0; 590 int r; 591 uint32_t *config, no_regs = 0; 592 593 if (size & 0x3 || *pos & 0x3) 594 return -EINVAL; 595 596 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL); 597 if (!config) 598 return -ENOMEM; 599 600 /* version, increment each time something is added */ 601 config[no_regs++] = 3; 602 config[no_regs++] = adev->gfx.config.max_shader_engines; 603 config[no_regs++] = adev->gfx.config.max_tile_pipes; 604 config[no_regs++] = adev->gfx.config.max_cu_per_sh; 605 config[no_regs++] = adev->gfx.config.max_sh_per_se; 606 config[no_regs++] = adev->gfx.config.max_backends_per_se; 607 config[no_regs++] = adev->gfx.config.max_texture_channel_caches; 608 config[no_regs++] = adev->gfx.config.max_gprs; 609 config[no_regs++] = adev->gfx.config.max_gs_threads; 610 config[no_regs++] = adev->gfx.config.max_hw_contexts; 611 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend; 612 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend; 613 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size; 614 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size; 615 config[no_regs++] = adev->gfx.config.num_tile_pipes; 616 config[no_regs++] = adev->gfx.config.backend_enable_mask; 617 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes; 618 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb; 619 config[no_regs++] = adev->gfx.config.shader_engine_tile_size; 620 config[no_regs++] = adev->gfx.config.num_gpus; 621 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size; 622 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg; 623 config[no_regs++] = adev->gfx.config.gb_addr_config; 624 config[no_regs++] = adev->gfx.config.num_rbs; 625 626 /* rev==1 */ 627 config[no_regs++] = adev->rev_id; 628 config[no_regs++] = adev->pg_flags; 629 config[no_regs++] = adev->cg_flags; 630 631 /* rev==2 */ 632 config[no_regs++] = adev->family; 633 config[no_regs++] = adev->external_rev_id; 634 635 /* rev==3 */ 636 config[no_regs++] = adev->pdev->device; 637 config[no_regs++] = adev->pdev->revision; 638 config[no_regs++] = adev->pdev->subsystem_device; 639 config[no_regs++] = adev->pdev->subsystem_vendor; 640 641 while (size && (*pos < no_regs * 4)) { 642 uint32_t value; 643 644 value = config[*pos >> 2]; 645 r = put_user(value, (uint32_t *)buf); 646 if (r) { 647 kfree(config); 648 return r; 649 } 650 651 result += 4; 652 buf += 4; 653 *pos += 4; 654 size -= 4; 655 } 656 657 kfree(config); 658 return result; 659 } 660 661 /** 662 * amdgpu_debugfs_sensor_read - Read from the powerplay sensors 663 * 664 * @f: open file handle 665 * @buf: User buffer to store read data in 666 * @size: Number of bytes to read 667 * @pos: Offset to seek to 668 * 669 * The offset is treated as the BYTE address of one of the sensors 670 * enumerated in amd/include/kgd_pp_interface.h under the 671 * 'amd_pp_sensors' enumeration. For instance to read the UVD VCLK 672 * you would use the offset 3 * 4 = 12. 673 */ 674 static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, 675 size_t size, loff_t *pos) 676 { 677 struct amdgpu_device *adev = file_inode(f)->i_private; 678 int idx, x, outsize, r, valuesize; 679 uint32_t values[16]; 680 681 if (size & 3 || *pos & 0x3) 682 return -EINVAL; 683 684 if (!adev->pm.dpm_enabled) 685 return -EINVAL; 686 687 /* convert offset to sensor number */ 688 idx = *pos >> 2; 689 690 valuesize = sizeof(values); 691 692 r = pm_runtime_get_sync(adev->ddev->dev); 693 if (r < 0) 694 return r; 695 696 r = amdgpu_virt_enable_access_debugfs(adev); 697 if (r < 0) 698 return r; 699 700 r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize); 701 702 pm_runtime_mark_last_busy(adev->ddev->dev); 703 pm_runtime_put_autosuspend(adev->ddev->dev); 704 705 if (r) { 706 amdgpu_virt_disable_access_debugfs(adev); 707 return r; 708 } 709 710 if (size > valuesize) { 711 amdgpu_virt_disable_access_debugfs(adev); 712 return -EINVAL; 713 } 714 715 outsize = 0; 716 x = 0; 717 if (!r) { 718 while (size) { 719 r = put_user(values[x++], (int32_t *)buf); 720 buf += 4; 721 size -= 4; 722 outsize += 4; 723 } 724 } 725 726 amdgpu_virt_disable_access_debugfs(adev); 727 return !r ? outsize : r; 728 } 729 730 /** amdgpu_debugfs_wave_read - Read WAVE STATUS data 731 * 732 * @f: open file handle 733 * @buf: User buffer to store read data in 734 * @size: Number of bytes to read 735 * @pos: Offset to seek to 736 * 737 * The offset being sought changes which wave that the status data 738 * will be returned for. The bits are used as follows: 739 * 740 * Bits 0..6: Byte offset into data 741 * Bits 7..14: SE selector 742 * Bits 15..22: SH/SA selector 743 * Bits 23..30: CU/{WGP+SIMD} selector 744 * Bits 31..36: WAVE ID selector 745 * Bits 37..44: SIMD ID selector 746 * 747 * The returned data begins with one DWORD of version information 748 * Followed by WAVE STATUS registers relevant to the GFX IP version 749 * being used. See gfx_v8_0_read_wave_data() for an example output. 750 */ 751 static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, 752 size_t size, loff_t *pos) 753 { 754 struct amdgpu_device *adev = f->f_inode->i_private; 755 int r, x; 756 ssize_t result=0; 757 uint32_t offset, se, sh, cu, wave, simd, data[32]; 758 759 if (size & 3 || *pos & 3) 760 return -EINVAL; 761 762 /* decode offset */ 763 offset = (*pos & GENMASK_ULL(6, 0)); 764 se = (*pos & GENMASK_ULL(14, 7)) >> 7; 765 sh = (*pos & GENMASK_ULL(22, 15)) >> 15; 766 cu = (*pos & GENMASK_ULL(30, 23)) >> 23; 767 wave = (*pos & GENMASK_ULL(36, 31)) >> 31; 768 simd = (*pos & GENMASK_ULL(44, 37)) >> 37; 769 770 r = pm_runtime_get_sync(adev->ddev->dev); 771 if (r < 0) 772 return r; 773 774 r = amdgpu_virt_enable_access_debugfs(adev); 775 if (r < 0) 776 return r; 777 778 /* switch to the specific se/sh/cu */ 779 mutex_lock(&adev->grbm_idx_mutex); 780 amdgpu_gfx_select_se_sh(adev, se, sh, cu); 781 782 x = 0; 783 if (adev->gfx.funcs->read_wave_data) 784 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x); 785 786 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); 787 mutex_unlock(&adev->grbm_idx_mutex); 788 789 pm_runtime_mark_last_busy(adev->ddev->dev); 790 pm_runtime_put_autosuspend(adev->ddev->dev); 791 792 if (!x) { 793 amdgpu_virt_disable_access_debugfs(adev); 794 return -EINVAL; 795 } 796 797 while (size && (offset < x * 4)) { 798 uint32_t value; 799 800 value = data[offset >> 2]; 801 r = put_user(value, (uint32_t *)buf); 802 if (r) { 803 amdgpu_virt_disable_access_debugfs(adev); 804 return r; 805 } 806 807 result += 4; 808 buf += 4; 809 offset += 4; 810 size -= 4; 811 } 812 813 amdgpu_virt_disable_access_debugfs(adev); 814 return result; 815 } 816 817 /** amdgpu_debugfs_gpr_read - Read wave gprs 818 * 819 * @f: open file handle 820 * @buf: User buffer to store read data in 821 * @size: Number of bytes to read 822 * @pos: Offset to seek to 823 * 824 * The offset being sought changes which wave that the status data 825 * will be returned for. The bits are used as follows: 826 * 827 * Bits 0..11: Byte offset into data 828 * Bits 12..19: SE selector 829 * Bits 20..27: SH/SA selector 830 * Bits 28..35: CU/{WGP+SIMD} selector 831 * Bits 36..43: WAVE ID selector 832 * Bits 37..44: SIMD ID selector 833 * Bits 52..59: Thread selector 834 * Bits 60..61: Bank selector (VGPR=0,SGPR=1) 835 * 836 * The return data comes from the SGPR or VGPR register bank for 837 * the selected operational unit. 838 */ 839 static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, 840 size_t size, loff_t *pos) 841 { 842 struct amdgpu_device *adev = f->f_inode->i_private; 843 int r; 844 ssize_t result = 0; 845 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data; 846 847 if (size > 4096 || size & 3 || *pos & 3) 848 return -EINVAL; 849 850 /* decode offset */ 851 offset = (*pos & GENMASK_ULL(11, 0)) >> 2; 852 se = (*pos & GENMASK_ULL(19, 12)) >> 12; 853 sh = (*pos & GENMASK_ULL(27, 20)) >> 20; 854 cu = (*pos & GENMASK_ULL(35, 28)) >> 28; 855 wave = (*pos & GENMASK_ULL(43, 36)) >> 36; 856 simd = (*pos & GENMASK_ULL(51, 44)) >> 44; 857 thread = (*pos & GENMASK_ULL(59, 52)) >> 52; 858 bank = (*pos & GENMASK_ULL(61, 60)) >> 60; 859 860 data = kcalloc(1024, sizeof(*data), GFP_KERNEL); 861 if (!data) 862 return -ENOMEM; 863 864 r = pm_runtime_get_sync(adev->ddev->dev); 865 if (r < 0) 866 return r; 867 868 r = amdgpu_virt_enable_access_debugfs(adev); 869 if (r < 0) 870 return r; 871 872 /* switch to the specific se/sh/cu */ 873 mutex_lock(&adev->grbm_idx_mutex); 874 amdgpu_gfx_select_se_sh(adev, se, sh, cu); 875 876 if (bank == 0) { 877 if (adev->gfx.funcs->read_wave_vgprs) 878 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data); 879 } else { 880 if (adev->gfx.funcs->read_wave_sgprs) 881 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data); 882 } 883 884 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); 885 mutex_unlock(&adev->grbm_idx_mutex); 886 887 pm_runtime_mark_last_busy(adev->ddev->dev); 888 pm_runtime_put_autosuspend(adev->ddev->dev); 889 890 while (size) { 891 uint32_t value; 892 893 value = data[result >> 2]; 894 r = put_user(value, (uint32_t *)buf); 895 if (r) { 896 result = r; 897 goto err; 898 } 899 900 result += 4; 901 buf += 4; 902 size -= 4; 903 } 904 905 err: 906 kfree(data); 907 amdgpu_virt_disable_access_debugfs(adev); 908 return result; 909 } 910 911 /** 912 * amdgpu_debugfs_regs_gfxoff_write - Enable/disable GFXOFF 913 * 914 * @f: open file handle 915 * @buf: User buffer to write data from 916 * @size: Number of bytes to write 917 * @pos: Offset to seek to 918 * 919 * Write a 32-bit zero to disable or a 32-bit non-zero to enable 920 */ 921 static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *buf, 922 size_t size, loff_t *pos) 923 { 924 struct amdgpu_device *adev = file_inode(f)->i_private; 925 ssize_t result = 0; 926 int r; 927 928 if (size & 0x3 || *pos & 0x3) 929 return -EINVAL; 930 931 r = pm_runtime_get_sync(adev->ddev->dev); 932 if (r < 0) 933 return r; 934 935 while (size) { 936 uint32_t value; 937 938 r = get_user(value, (uint32_t *)buf); 939 if (r) { 940 pm_runtime_mark_last_busy(adev->ddev->dev); 941 pm_runtime_put_autosuspend(adev->ddev->dev); 942 return r; 943 } 944 945 amdgpu_gfx_off_ctrl(adev, value ? true : false); 946 947 result += 4; 948 buf += 4; 949 *pos += 4; 950 size -= 4; 951 } 952 953 pm_runtime_mark_last_busy(adev->ddev->dev); 954 pm_runtime_put_autosuspend(adev->ddev->dev); 955 956 return result; 957 } 958 959 960 static const struct file_operations amdgpu_debugfs_regs_fops = { 961 .owner = THIS_MODULE, 962 .read = amdgpu_debugfs_regs_read, 963 .write = amdgpu_debugfs_regs_write, 964 .llseek = default_llseek 965 }; 966 static const struct file_operations amdgpu_debugfs_regs_didt_fops = { 967 .owner = THIS_MODULE, 968 .read = amdgpu_debugfs_regs_didt_read, 969 .write = amdgpu_debugfs_regs_didt_write, 970 .llseek = default_llseek 971 }; 972 static const struct file_operations amdgpu_debugfs_regs_pcie_fops = { 973 .owner = THIS_MODULE, 974 .read = amdgpu_debugfs_regs_pcie_read, 975 .write = amdgpu_debugfs_regs_pcie_write, 976 .llseek = default_llseek 977 }; 978 static const struct file_operations amdgpu_debugfs_regs_smc_fops = { 979 .owner = THIS_MODULE, 980 .read = amdgpu_debugfs_regs_smc_read, 981 .write = amdgpu_debugfs_regs_smc_write, 982 .llseek = default_llseek 983 }; 984 985 static const struct file_operations amdgpu_debugfs_gca_config_fops = { 986 .owner = THIS_MODULE, 987 .read = amdgpu_debugfs_gca_config_read, 988 .llseek = default_llseek 989 }; 990 991 static const struct file_operations amdgpu_debugfs_sensors_fops = { 992 .owner = THIS_MODULE, 993 .read = amdgpu_debugfs_sensor_read, 994 .llseek = default_llseek 995 }; 996 997 static const struct file_operations amdgpu_debugfs_wave_fops = { 998 .owner = THIS_MODULE, 999 .read = amdgpu_debugfs_wave_read, 1000 .llseek = default_llseek 1001 }; 1002 static const struct file_operations amdgpu_debugfs_gpr_fops = { 1003 .owner = THIS_MODULE, 1004 .read = amdgpu_debugfs_gpr_read, 1005 .llseek = default_llseek 1006 }; 1007 1008 static const struct file_operations amdgpu_debugfs_gfxoff_fops = { 1009 .owner = THIS_MODULE, 1010 .write = amdgpu_debugfs_gfxoff_write, 1011 }; 1012 1013 static const struct file_operations *debugfs_regs[] = { 1014 &amdgpu_debugfs_regs_fops, 1015 &amdgpu_debugfs_regs_didt_fops, 1016 &amdgpu_debugfs_regs_pcie_fops, 1017 &amdgpu_debugfs_regs_smc_fops, 1018 &amdgpu_debugfs_gca_config_fops, 1019 &amdgpu_debugfs_sensors_fops, 1020 &amdgpu_debugfs_wave_fops, 1021 &amdgpu_debugfs_gpr_fops, 1022 &amdgpu_debugfs_gfxoff_fops, 1023 }; 1024 1025 static const char *debugfs_regs_names[] = { 1026 "amdgpu_regs", 1027 "amdgpu_regs_didt", 1028 "amdgpu_regs_pcie", 1029 "amdgpu_regs_smc", 1030 "amdgpu_gca_config", 1031 "amdgpu_sensors", 1032 "amdgpu_wave", 1033 "amdgpu_gpr", 1034 "amdgpu_gfxoff", 1035 }; 1036 1037 /** 1038 * amdgpu_debugfs_regs_init - Initialize debugfs entries that provide 1039 * register access. 1040 * 1041 * @adev: The device to attach the debugfs entries to 1042 */ 1043 int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) 1044 { 1045 struct drm_minor *minor = adev->ddev->primary; 1046 struct dentry *ent, *root = minor->debugfs_root; 1047 unsigned int i; 1048 1049 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { 1050 ent = debugfs_create_file(debugfs_regs_names[i], 1051 S_IFREG | S_IRUGO, root, 1052 adev, debugfs_regs[i]); 1053 if (!i && !IS_ERR_OR_NULL(ent)) 1054 i_size_write(ent->d_inode, adev->rmmio_size); 1055 adev->debugfs_regs[i] = ent; 1056 } 1057 1058 return 0; 1059 } 1060 1061 static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data) 1062 { 1063 struct drm_info_node *node = (struct drm_info_node *) m->private; 1064 struct drm_device *dev = node->minor->dev; 1065 struct amdgpu_device *adev = dev->dev_private; 1066 int r = 0, i; 1067 1068 r = pm_runtime_get_sync(dev->dev); 1069 if (r < 0) 1070 return r; 1071 1072 /* Avoid accidently unparking the sched thread during GPU reset */ 1073 mutex_lock(&adev->lock_reset); 1074 1075 /* hold on the scheduler */ 1076 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 1077 struct amdgpu_ring *ring = adev->rings[i]; 1078 1079 if (!ring || !ring->sched.thread) 1080 continue; 1081 kthread_park(ring->sched.thread); 1082 } 1083 1084 seq_printf(m, "run ib test:\n"); 1085 r = amdgpu_ib_ring_tests(adev); 1086 if (r) 1087 seq_printf(m, "ib ring tests failed (%d).\n", r); 1088 else 1089 seq_printf(m, "ib ring tests passed.\n"); 1090 1091 /* go on the scheduler */ 1092 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 1093 struct amdgpu_ring *ring = adev->rings[i]; 1094 1095 if (!ring || !ring->sched.thread) 1096 continue; 1097 kthread_unpark(ring->sched.thread); 1098 } 1099 1100 mutex_unlock(&adev->lock_reset); 1101 1102 pm_runtime_mark_last_busy(dev->dev); 1103 pm_runtime_put_autosuspend(dev->dev); 1104 1105 return 0; 1106 } 1107 1108 static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data) 1109 { 1110 struct drm_info_node *node = (struct drm_info_node *) m->private; 1111 struct drm_device *dev = node->minor->dev; 1112 struct amdgpu_device *adev = dev->dev_private; 1113 1114 seq_write(m, adev->bios, adev->bios_size); 1115 return 0; 1116 } 1117 1118 static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data) 1119 { 1120 struct drm_info_node *node = (struct drm_info_node *)m->private; 1121 struct drm_device *dev = node->minor->dev; 1122 struct amdgpu_device *adev = dev->dev_private; 1123 int r; 1124 1125 r = pm_runtime_get_sync(dev->dev); 1126 if (r < 0) 1127 return r; 1128 1129 seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev)); 1130 1131 pm_runtime_mark_last_busy(dev->dev); 1132 pm_runtime_put_autosuspend(dev->dev); 1133 1134 return 0; 1135 } 1136 1137 static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data) 1138 { 1139 struct drm_info_node *node = (struct drm_info_node *)m->private; 1140 struct drm_device *dev = node->minor->dev; 1141 struct amdgpu_device *adev = dev->dev_private; 1142 int r; 1143 1144 r = pm_runtime_get_sync(dev->dev); 1145 if (r < 0) 1146 return r; 1147 1148 seq_printf(m, "(%d)\n", ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_TT)); 1149 1150 pm_runtime_mark_last_busy(dev->dev); 1151 pm_runtime_put_autosuspend(dev->dev); 1152 1153 return 0; 1154 } 1155 1156 static const struct drm_info_list amdgpu_debugfs_list[] = { 1157 {"amdgpu_vbios", amdgpu_debugfs_get_vbios_dump}, 1158 {"amdgpu_test_ib", &amdgpu_debugfs_test_ib}, 1159 {"amdgpu_evict_vram", &amdgpu_debugfs_evict_vram}, 1160 {"amdgpu_evict_gtt", &amdgpu_debugfs_evict_gtt}, 1161 }; 1162 1163 static void amdgpu_ib_preempt_fences_swap(struct amdgpu_ring *ring, 1164 struct dma_fence **fences) 1165 { 1166 struct amdgpu_fence_driver *drv = &ring->fence_drv; 1167 uint32_t sync_seq, last_seq; 1168 1169 last_seq = atomic_read(&ring->fence_drv.last_seq); 1170 sync_seq = ring->fence_drv.sync_seq; 1171 1172 last_seq &= drv->num_fences_mask; 1173 sync_seq &= drv->num_fences_mask; 1174 1175 do { 1176 struct dma_fence *fence, **ptr; 1177 1178 ++last_seq; 1179 last_seq &= drv->num_fences_mask; 1180 ptr = &drv->fences[last_seq]; 1181 1182 fence = rcu_dereference_protected(*ptr, 1); 1183 RCU_INIT_POINTER(*ptr, NULL); 1184 1185 if (!fence) 1186 continue; 1187 1188 fences[last_seq] = fence; 1189 1190 } while (last_seq != sync_seq); 1191 } 1192 1193 static void amdgpu_ib_preempt_signal_fences(struct dma_fence **fences, 1194 int length) 1195 { 1196 int i; 1197 struct dma_fence *fence; 1198 1199 for (i = 0; i < length; i++) { 1200 fence = fences[i]; 1201 if (!fence) 1202 continue; 1203 dma_fence_signal(fence); 1204 dma_fence_put(fence); 1205 } 1206 } 1207 1208 static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched) 1209 { 1210 struct drm_sched_job *s_job; 1211 struct dma_fence *fence; 1212 1213 spin_lock(&sched->job_list_lock); 1214 list_for_each_entry(s_job, &sched->ring_mirror_list, node) { 1215 fence = sched->ops->run_job(s_job); 1216 dma_fence_put(fence); 1217 } 1218 spin_unlock(&sched->job_list_lock); 1219 } 1220 1221 static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring) 1222 { 1223 struct amdgpu_job *job; 1224 struct drm_sched_job *s_job; 1225 uint32_t preempt_seq; 1226 struct dma_fence *fence, **ptr; 1227 struct amdgpu_fence_driver *drv = &ring->fence_drv; 1228 struct drm_gpu_scheduler *sched = &ring->sched; 1229 1230 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX) 1231 return; 1232 1233 preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2)); 1234 if (preempt_seq <= atomic_read(&drv->last_seq)) 1235 return; 1236 1237 preempt_seq &= drv->num_fences_mask; 1238 ptr = &drv->fences[preempt_seq]; 1239 fence = rcu_dereference_protected(*ptr, 1); 1240 1241 spin_lock(&sched->job_list_lock); 1242 list_for_each_entry(s_job, &sched->ring_mirror_list, node) { 1243 job = to_amdgpu_job(s_job); 1244 if (job->fence == fence) 1245 /* mark the job as preempted */ 1246 job->preemption_status |= AMDGPU_IB_PREEMPTED; 1247 } 1248 spin_unlock(&sched->job_list_lock); 1249 } 1250 1251 static int amdgpu_debugfs_ib_preempt(void *data, u64 val) 1252 { 1253 int r, resched, length; 1254 struct amdgpu_ring *ring; 1255 struct dma_fence **fences = NULL; 1256 struct amdgpu_device *adev = (struct amdgpu_device *)data; 1257 1258 if (val >= AMDGPU_MAX_RINGS) 1259 return -EINVAL; 1260 1261 ring = adev->rings[val]; 1262 1263 if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread) 1264 return -EINVAL; 1265 1266 /* the last preemption failed */ 1267 if (ring->trail_seq != le32_to_cpu(*ring->trail_fence_cpu_addr)) 1268 return -EBUSY; 1269 1270 length = ring->fence_drv.num_fences_mask + 1; 1271 fences = kcalloc(length, sizeof(void *), GFP_KERNEL); 1272 if (!fences) 1273 return -ENOMEM; 1274 1275 /* Avoid accidently unparking the sched thread during GPU reset */ 1276 mutex_lock(&adev->lock_reset); 1277 1278 /* stop the scheduler */ 1279 kthread_park(ring->sched.thread); 1280 1281 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); 1282 1283 /* preempt the IB */ 1284 r = amdgpu_ring_preempt_ib(ring); 1285 if (r) { 1286 DRM_WARN("failed to preempt ring %d\n", ring->idx); 1287 goto failure; 1288 } 1289 1290 amdgpu_fence_process(ring); 1291 1292 if (atomic_read(&ring->fence_drv.last_seq) != 1293 ring->fence_drv.sync_seq) { 1294 DRM_INFO("ring %d was preempted\n", ring->idx); 1295 1296 amdgpu_ib_preempt_mark_partial_job(ring); 1297 1298 /* swap out the old fences */ 1299 amdgpu_ib_preempt_fences_swap(ring, fences); 1300 1301 amdgpu_fence_driver_force_completion(ring); 1302 1303 /* resubmit unfinished jobs */ 1304 amdgpu_ib_preempt_job_recovery(&ring->sched); 1305 1306 /* wait for jobs finished */ 1307 amdgpu_fence_wait_empty(ring); 1308 1309 /* signal the old fences */ 1310 amdgpu_ib_preempt_signal_fences(fences, length); 1311 } 1312 1313 failure: 1314 /* restart the scheduler */ 1315 kthread_unpark(ring->sched.thread); 1316 1317 mutex_unlock(&adev->lock_reset); 1318 1319 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); 1320 1321 kfree(fences); 1322 1323 return 0; 1324 } 1325 1326 static int amdgpu_debugfs_sclk_set(void *data, u64 val) 1327 { 1328 int ret = 0; 1329 uint32_t max_freq, min_freq; 1330 struct amdgpu_device *adev = (struct amdgpu_device *)data; 1331 1332 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1333 return -EINVAL; 1334 1335 ret = pm_runtime_get_sync(adev->ddev->dev); 1336 if (ret < 0) 1337 return ret; 1338 1339 if (is_support_sw_smu(adev)) { 1340 ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq, true); 1341 if (ret || val > max_freq || val < min_freq) 1342 return -EINVAL; 1343 ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val, true); 1344 } else { 1345 return 0; 1346 } 1347 1348 pm_runtime_mark_last_busy(adev->ddev->dev); 1349 pm_runtime_put_autosuspend(adev->ddev->dev); 1350 1351 if (ret) 1352 return -EINVAL; 1353 1354 return 0; 1355 } 1356 1357 DEFINE_SIMPLE_ATTRIBUTE(fops_ib_preempt, NULL, 1358 amdgpu_debugfs_ib_preempt, "%llu\n"); 1359 1360 DEFINE_SIMPLE_ATTRIBUTE(fops_sclk_set, NULL, 1361 amdgpu_debugfs_sclk_set, "%llu\n"); 1362 1363 int amdgpu_debugfs_init(struct amdgpu_device *adev) 1364 { 1365 int r, i; 1366 1367 adev->debugfs_preempt = 1368 debugfs_create_file("amdgpu_preempt_ib", 0600, 1369 adev->ddev->primary->debugfs_root, adev, 1370 &fops_ib_preempt); 1371 if (!(adev->debugfs_preempt)) { 1372 DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n"); 1373 return -EIO; 1374 } 1375 1376 adev->smu.debugfs_sclk = 1377 debugfs_create_file("amdgpu_force_sclk", 0200, 1378 adev->ddev->primary->debugfs_root, adev, 1379 &fops_sclk_set); 1380 if (!(adev->smu.debugfs_sclk)) { 1381 DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n"); 1382 return -EIO; 1383 } 1384 1385 /* Register debugfs entries for amdgpu_ttm */ 1386 r = amdgpu_ttm_debugfs_init(adev); 1387 if (r) { 1388 DRM_ERROR("Failed to init debugfs\n"); 1389 return r; 1390 } 1391 1392 r = amdgpu_debugfs_pm_init(adev); 1393 if (r) { 1394 DRM_ERROR("Failed to register debugfs file for dpm!\n"); 1395 return r; 1396 } 1397 1398 if (amdgpu_debugfs_sa_init(adev)) { 1399 dev_err(adev->dev, "failed to register debugfs file for SA\n"); 1400 } 1401 1402 if (amdgpu_debugfs_fence_init(adev)) 1403 dev_err(adev->dev, "fence debugfs file creation failed\n"); 1404 1405 r = amdgpu_debugfs_gem_init(adev); 1406 if (r) 1407 DRM_ERROR("registering gem debugfs failed (%d).\n", r); 1408 1409 r = amdgpu_debugfs_regs_init(adev); 1410 if (r) 1411 DRM_ERROR("registering register debugfs failed (%d).\n", r); 1412 1413 r = amdgpu_debugfs_firmware_init(adev); 1414 if (r) 1415 DRM_ERROR("registering firmware debugfs failed (%d).\n", r); 1416 1417 #if defined(CONFIG_DRM_AMD_DC) 1418 if (amdgpu_device_has_dc_support(adev)) { 1419 if (dtn_debugfs_init(adev)) 1420 DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n"); 1421 } 1422 #endif 1423 1424 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1425 struct amdgpu_ring *ring = adev->rings[i]; 1426 1427 if (!ring) 1428 continue; 1429 1430 if (amdgpu_debugfs_ring_init(adev, ring)) { 1431 DRM_ERROR("Failed to register debugfs file for rings !\n"); 1432 } 1433 } 1434 1435 amdgpu_ras_debugfs_create_all(adev); 1436 1437 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list, 1438 ARRAY_SIZE(amdgpu_debugfs_list)); 1439 } 1440 1441 #else 1442 int amdgpu_debugfs_init(struct amdgpu_device *adev) 1443 { 1444 return 0; 1445 } 1446 int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) 1447 { 1448 return 0; 1449 } 1450 #endif 1451