1 /* 2 * Copyright 2023 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #include "kfd_debug.h" 24 #include "kfd_device_queue_manager.h" 25 #include "kfd_topology.h" 26 #include <linux/file.h> 27 #include <uapi/linux/kfd_ioctl.h> 28 29 #define MAX_WATCH_ADDRESSES 4 30 31 int kfd_dbg_ev_query_debug_event(struct kfd_process *process, 32 unsigned int *queue_id, 33 unsigned int *gpu_id, 34 uint64_t exception_clear_mask, 35 uint64_t *event_status) 36 { 37 struct process_queue_manager *pqm; 38 struct process_queue_node *pqn; 39 int i; 40 41 if (!(process && process->debug_trap_enabled)) 42 return -ENODATA; 43 44 mutex_lock(&process->event_mutex); 45 *event_status = 0; 46 *queue_id = 0; 47 *gpu_id = 0; 48 49 /* find and report queue events */ 50 pqm = &process->pqm; 51 list_for_each_entry(pqn, &pqm->queues, process_queue_list) { 52 uint64_t tmp = process->exception_enable_mask; 53 54 if (!pqn->q) 55 continue; 56 57 tmp &= pqn->q->properties.exception_status; 58 59 if (!tmp) 60 continue; 61 62 *event_status = pqn->q->properties.exception_status; 63 *queue_id = pqn->q->properties.queue_id; 64 *gpu_id = pqn->q->device->id; 65 pqn->q->properties.exception_status &= ~exception_clear_mask; 66 goto out; 67 } 68 69 /* find and report device events */ 70 for (i = 0; i < process->n_pdds; i++) { 71 struct kfd_process_device *pdd = process->pdds[i]; 72 uint64_t tmp = process->exception_enable_mask 73 & pdd->exception_status; 74 75 if (!tmp) 76 continue; 77 78 *event_status = pdd->exception_status; 79 *gpu_id = pdd->dev->id; 80 pdd->exception_status &= ~exception_clear_mask; 81 goto out; 82 } 83 84 /* report process events */ 85 if (process->exception_enable_mask & process->exception_status) { 86 *event_status = process->exception_status; 87 process->exception_status &= ~exception_clear_mask; 88 } 89 90 out: 91 mutex_unlock(&process->event_mutex); 92 return *event_status ? 0 : -EAGAIN; 93 } 94 95 void debug_event_write_work_handler(struct work_struct *work) 96 { 97 struct kfd_process *process; 98 99 static const char write_data = '.'; 100 loff_t pos = 0; 101 102 process = container_of(work, 103 struct kfd_process, 104 debug_event_workarea); 105 106 kernel_write(process->dbg_ev_file, &write_data, 1, &pos); 107 } 108 109 /* update process/device/queue exception status, write to descriptor 110 * only if exception_status is enabled. 111 */ 112 bool kfd_dbg_ev_raise(uint64_t event_mask, 113 struct kfd_process *process, struct kfd_node *dev, 114 unsigned int source_id, bool use_worker, 115 void *exception_data, size_t exception_data_size) 116 { 117 struct process_queue_manager *pqm; 118 struct process_queue_node *pqn; 119 int i; 120 static const char write_data = '.'; 121 loff_t pos = 0; 122 bool is_subscribed = true; 123 124 if (!(process && process->debug_trap_enabled)) 125 return false; 126 127 mutex_lock(&process->event_mutex); 128 129 if (event_mask & KFD_EC_MASK_DEVICE) { 130 for (i = 0; i < process->n_pdds; i++) { 131 struct kfd_process_device *pdd = process->pdds[i]; 132 133 if (pdd->dev != dev) 134 continue; 135 136 pdd->exception_status |= event_mask & KFD_EC_MASK_DEVICE; 137 138 if (event_mask & KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION)) { 139 if (!pdd->vm_fault_exc_data) { 140 pdd->vm_fault_exc_data = kmemdup( 141 exception_data, 142 exception_data_size, 143 GFP_KERNEL); 144 if (!pdd->vm_fault_exc_data) 145 pr_debug("Failed to allocate exception data memory"); 146 } else { 147 pr_debug("Debugger exception data not saved\n"); 148 print_hex_dump_bytes("exception data: ", 149 DUMP_PREFIX_OFFSET, 150 exception_data, 151 exception_data_size); 152 } 153 } 154 break; 155 } 156 } else if (event_mask & KFD_EC_MASK_PROCESS) { 157 process->exception_status |= event_mask & KFD_EC_MASK_PROCESS; 158 } else { 159 pqm = &process->pqm; 160 list_for_each_entry(pqn, &pqm->queues, 161 process_queue_list) { 162 int target_id; 163 164 if (!pqn->q) 165 continue; 166 167 target_id = event_mask & KFD_EC_MASK(EC_QUEUE_NEW) ? 168 pqn->q->properties.queue_id : 169 pqn->q->doorbell_id; 170 171 if (pqn->q->device != dev || target_id != source_id) 172 continue; 173 174 pqn->q->properties.exception_status |= event_mask; 175 break; 176 } 177 } 178 179 if (process->exception_enable_mask & event_mask) { 180 if (use_worker) 181 schedule_work(&process->debug_event_workarea); 182 else 183 kernel_write(process->dbg_ev_file, 184 &write_data, 185 1, 186 &pos); 187 } else { 188 is_subscribed = false; 189 } 190 191 mutex_unlock(&process->event_mutex); 192 193 return is_subscribed; 194 } 195 196 /* set pending event queue entry from ring entry */ 197 bool kfd_set_dbg_ev_from_interrupt(struct kfd_node *dev, 198 unsigned int pasid, 199 uint32_t doorbell_id, 200 uint64_t trap_mask, 201 void *exception_data, 202 size_t exception_data_size) 203 { 204 struct kfd_process *p; 205 bool signaled_to_debugger_or_runtime = false; 206 207 p = kfd_lookup_process_by_pasid(pasid); 208 209 if (!p) 210 return false; 211 212 if (!kfd_dbg_ev_raise(trap_mask, p, dev, doorbell_id, true, 213 exception_data, exception_data_size)) { 214 struct process_queue_manager *pqm; 215 struct process_queue_node *pqn; 216 217 if (!!(trap_mask & KFD_EC_MASK_QUEUE) && 218 p->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED) { 219 mutex_lock(&p->mutex); 220 221 pqm = &p->pqm; 222 list_for_each_entry(pqn, &pqm->queues, 223 process_queue_list) { 224 225 if (!(pqn->q && pqn->q->device == dev && 226 pqn->q->doorbell_id == doorbell_id)) 227 continue; 228 229 kfd_send_exception_to_runtime(p, pqn->q->properties.queue_id, 230 trap_mask); 231 232 signaled_to_debugger_or_runtime = true; 233 234 break; 235 } 236 237 mutex_unlock(&p->mutex); 238 } else if (trap_mask & KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION)) { 239 kfd_dqm_evict_pasid(dev->dqm, p->pasid); 240 kfd_signal_vm_fault_event(dev, p->pasid, NULL, 241 exception_data); 242 243 signaled_to_debugger_or_runtime = true; 244 } 245 } else { 246 signaled_to_debugger_or_runtime = true; 247 } 248 249 kfd_unref_process(p); 250 251 return signaled_to_debugger_or_runtime; 252 } 253 254 int kfd_dbg_send_exception_to_runtime(struct kfd_process *p, 255 unsigned int dev_id, 256 unsigned int queue_id, 257 uint64_t error_reason) 258 { 259 if (error_reason & KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION)) { 260 struct kfd_process_device *pdd = NULL; 261 struct kfd_hsa_memory_exception_data *data; 262 int i; 263 264 for (i = 0; i < p->n_pdds; i++) { 265 if (p->pdds[i]->dev->id == dev_id) { 266 pdd = p->pdds[i]; 267 break; 268 } 269 } 270 271 if (!pdd) 272 return -ENODEV; 273 274 data = (struct kfd_hsa_memory_exception_data *) 275 pdd->vm_fault_exc_data; 276 277 kfd_dqm_evict_pasid(pdd->dev->dqm, p->pasid); 278 kfd_signal_vm_fault_event(pdd->dev, p->pasid, NULL, data); 279 error_reason &= ~KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION); 280 } 281 282 if (error_reason & (KFD_EC_MASK(EC_PROCESS_RUNTIME))) { 283 /* 284 * block should only happen after the debugger receives runtime 285 * enable notice. 286 */ 287 up(&p->runtime_enable_sema); 288 error_reason &= ~KFD_EC_MASK(EC_PROCESS_RUNTIME); 289 } 290 291 if (error_reason) 292 return kfd_send_exception_to_runtime(p, queue_id, error_reason); 293 294 return 0; 295 } 296 297 static int kfd_dbg_set_queue_workaround(struct queue *q, bool enable) 298 { 299 struct mqd_update_info minfo = {0}; 300 int err; 301 302 if (!q) 303 return 0; 304 305 if (KFD_GC_VERSION(q->device) < IP_VERSION(11, 0, 0) || 306 KFD_GC_VERSION(q->device) >= IP_VERSION(12, 0, 0)) 307 return 0; 308 309 if (enable && q->properties.is_user_cu_masked) 310 return -EBUSY; 311 312 minfo.update_flag = enable ? UPDATE_FLAG_DBG_WA_ENABLE : UPDATE_FLAG_DBG_WA_DISABLE; 313 314 q->properties.is_dbg_wa = enable; 315 err = q->device->dqm->ops.update_queue(q->device->dqm, q, &minfo); 316 if (err) 317 q->properties.is_dbg_wa = false; 318 319 return err; 320 } 321 322 static int kfd_dbg_set_workaround(struct kfd_process *target, bool enable) 323 { 324 struct process_queue_manager *pqm = &target->pqm; 325 struct process_queue_node *pqn; 326 int r = 0; 327 328 list_for_each_entry(pqn, &pqm->queues, process_queue_list) { 329 r = kfd_dbg_set_queue_workaround(pqn->q, enable); 330 if (enable && r) 331 goto unwind; 332 } 333 334 return 0; 335 336 unwind: 337 list_for_each_entry(pqn, &pqm->queues, process_queue_list) 338 kfd_dbg_set_queue_workaround(pqn->q, false); 339 340 if (enable) 341 target->runtime_info.runtime_state = r == -EBUSY ? 342 DEBUG_RUNTIME_STATE_ENABLED_BUSY : 343 DEBUG_RUNTIME_STATE_ENABLED_ERROR; 344 345 return r; 346 } 347 348 int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd) 349 { 350 uint32_t spi_dbg_cntl = pdd->spi_dbg_override | pdd->spi_dbg_launch_mode; 351 uint32_t flags = pdd->process->dbg_flags; 352 bool sq_trap_en = !!spi_dbg_cntl; 353 354 if (!kfd_dbg_is_per_vmid_supported(pdd->dev)) 355 return 0; 356 357 return amdgpu_mes_set_shader_debugger(pdd->dev->adev, pdd->proc_ctx_gpu_addr, spi_dbg_cntl, 358 pdd->watch_points, flags, sq_trap_en); 359 } 360 361 #define KFD_DEBUGGER_INVALID_WATCH_POINT_ID -1 362 static int kfd_dbg_get_dev_watch_id(struct kfd_process_device *pdd, int *watch_id) 363 { 364 int i; 365 366 *watch_id = KFD_DEBUGGER_INVALID_WATCH_POINT_ID; 367 368 spin_lock(&pdd->dev->kfd->watch_points_lock); 369 370 for (i = 0; i < MAX_WATCH_ADDRESSES; i++) { 371 /* device watchpoint in use so skip */ 372 if ((pdd->dev->kfd->alloc_watch_ids >> i) & 0x1) 373 continue; 374 375 pdd->alloc_watch_ids |= 0x1 << i; 376 pdd->dev->kfd->alloc_watch_ids |= 0x1 << i; 377 *watch_id = i; 378 spin_unlock(&pdd->dev->kfd->watch_points_lock); 379 return 0; 380 } 381 382 spin_unlock(&pdd->dev->kfd->watch_points_lock); 383 384 return -ENOMEM; 385 } 386 387 static void kfd_dbg_clear_dev_watch_id(struct kfd_process_device *pdd, int watch_id) 388 { 389 spin_lock(&pdd->dev->kfd->watch_points_lock); 390 391 /* process owns device watch point so safe to clear */ 392 if ((pdd->alloc_watch_ids >> watch_id) & 0x1) { 393 pdd->alloc_watch_ids &= ~(0x1 << watch_id); 394 pdd->dev->kfd->alloc_watch_ids &= ~(0x1 << watch_id); 395 } 396 397 spin_unlock(&pdd->dev->kfd->watch_points_lock); 398 } 399 400 static bool kfd_dbg_owns_dev_watch_id(struct kfd_process_device *pdd, int watch_id) 401 { 402 bool owns_watch_id = false; 403 404 spin_lock(&pdd->dev->kfd->watch_points_lock); 405 owns_watch_id = watch_id < MAX_WATCH_ADDRESSES && 406 ((pdd->alloc_watch_ids >> watch_id) & 0x1); 407 408 spin_unlock(&pdd->dev->kfd->watch_points_lock); 409 410 return owns_watch_id; 411 } 412 413 int kfd_dbg_trap_clear_dev_address_watch(struct kfd_process_device *pdd, 414 uint32_t watch_id) 415 { 416 int r; 417 418 if (!kfd_dbg_owns_dev_watch_id(pdd, watch_id)) 419 return -EINVAL; 420 421 if (!pdd->dev->kfd->shared_resources.enable_mes) { 422 r = debug_lock_and_unmap(pdd->dev->dqm); 423 if (r) 424 return r; 425 } 426 427 amdgpu_gfx_off_ctrl(pdd->dev->adev, false); 428 pdd->watch_points[watch_id] = pdd->dev->kfd2kgd->clear_address_watch( 429 pdd->dev->adev, 430 watch_id); 431 amdgpu_gfx_off_ctrl(pdd->dev->adev, true); 432 433 if (!pdd->dev->kfd->shared_resources.enable_mes) 434 r = debug_map_and_unlock(pdd->dev->dqm); 435 else 436 r = kfd_dbg_set_mes_debug_mode(pdd); 437 438 kfd_dbg_clear_dev_watch_id(pdd, watch_id); 439 440 return r; 441 } 442 443 int kfd_dbg_trap_set_dev_address_watch(struct kfd_process_device *pdd, 444 uint64_t watch_address, 445 uint32_t watch_address_mask, 446 uint32_t *watch_id, 447 uint32_t watch_mode) 448 { 449 int xcc_id, r = kfd_dbg_get_dev_watch_id(pdd, watch_id); 450 uint32_t xcc_mask = pdd->dev->xcc_mask; 451 452 if (r) 453 return r; 454 455 if (!pdd->dev->kfd->shared_resources.enable_mes) { 456 r = debug_lock_and_unmap(pdd->dev->dqm); 457 if (r) { 458 kfd_dbg_clear_dev_watch_id(pdd, *watch_id); 459 return r; 460 } 461 } 462 463 amdgpu_gfx_off_ctrl(pdd->dev->adev, false); 464 for_each_inst(xcc_id, xcc_mask) 465 pdd->watch_points[*watch_id] = pdd->dev->kfd2kgd->set_address_watch( 466 pdd->dev->adev, 467 watch_address, 468 watch_address_mask, 469 *watch_id, 470 watch_mode, 471 pdd->dev->vm_info.last_vmid_kfd, 472 xcc_id); 473 amdgpu_gfx_off_ctrl(pdd->dev->adev, true); 474 475 if (!pdd->dev->kfd->shared_resources.enable_mes) 476 r = debug_map_and_unlock(pdd->dev->dqm); 477 else 478 r = kfd_dbg_set_mes_debug_mode(pdd); 479 480 /* HWS is broken so no point in HW rollback but release the watchpoint anyways */ 481 if (r) 482 kfd_dbg_clear_dev_watch_id(pdd, *watch_id); 483 484 return 0; 485 } 486 487 static void kfd_dbg_clear_process_address_watch(struct kfd_process *target) 488 { 489 int i, j; 490 491 for (i = 0; i < target->n_pdds; i++) 492 for (j = 0; j < MAX_WATCH_ADDRESSES; j++) 493 kfd_dbg_trap_clear_dev_address_watch(target->pdds[i], j); 494 } 495 496 int kfd_dbg_trap_set_flags(struct kfd_process *target, uint32_t *flags) 497 { 498 uint32_t prev_flags = target->dbg_flags; 499 int i, r = 0, rewind_count = 0; 500 501 for (i = 0; i < target->n_pdds; i++) { 502 if (!kfd_dbg_is_per_vmid_supported(target->pdds[i]->dev) && 503 (*flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP)) { 504 *flags = prev_flags; 505 return -EACCES; 506 } 507 } 508 509 target->dbg_flags = *flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP; 510 *flags = prev_flags; 511 for (i = 0; i < target->n_pdds; i++) { 512 struct kfd_process_device *pdd = target->pdds[i]; 513 514 if (!kfd_dbg_is_per_vmid_supported(pdd->dev)) 515 continue; 516 517 if (!pdd->dev->kfd->shared_resources.enable_mes) 518 r = debug_refresh_runlist(pdd->dev->dqm); 519 else 520 r = kfd_dbg_set_mes_debug_mode(pdd); 521 522 if (r) { 523 target->dbg_flags = prev_flags; 524 break; 525 } 526 527 rewind_count++; 528 } 529 530 /* Rewind flags */ 531 if (r) { 532 target->dbg_flags = prev_flags; 533 534 for (i = 0; i < rewind_count; i++) { 535 struct kfd_process_device *pdd = target->pdds[i]; 536 537 if (!kfd_dbg_is_per_vmid_supported(pdd->dev)) 538 continue; 539 540 if (!pdd->dev->kfd->shared_resources.enable_mes) 541 debug_refresh_runlist(pdd->dev->dqm); 542 else 543 kfd_dbg_set_mes_debug_mode(pdd); 544 } 545 } 546 547 return r; 548 } 549 550 /* kfd_dbg_trap_deactivate: 551 * target: target process 552 * unwind: If this is unwinding a failed kfd_dbg_trap_enable() 553 * unwind_count: 554 * If unwind == true, how far down the pdd list we need 555 * to unwind 556 * else: ignored 557 */ 558 void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind_count) 559 { 560 int i; 561 562 if (!unwind) { 563 uint32_t flags = 0; 564 int resume_count = resume_queues(target, 0, NULL); 565 566 if (resume_count) 567 pr_debug("Resumed %d queues\n", resume_count); 568 569 cancel_work_sync(&target->debug_event_workarea); 570 kfd_dbg_clear_process_address_watch(target); 571 kfd_dbg_trap_set_wave_launch_mode(target, 0); 572 573 kfd_dbg_trap_set_flags(target, &flags); 574 } 575 576 for (i = 0; i < target->n_pdds; i++) { 577 struct kfd_process_device *pdd = target->pdds[i]; 578 579 /* If this is an unwind, and we have unwound the required 580 * enable calls on the pdd list, we need to stop now 581 * otherwise we may mess up another debugger session. 582 */ 583 if (unwind && i == unwind_count) 584 break; 585 586 kfd_process_set_trap_debug_flag(&pdd->qpd, false); 587 588 /* GFX off is already disabled by debug activate if not RLC restore supported. */ 589 if (kfd_dbg_is_rlc_restore_supported(pdd->dev)) 590 amdgpu_gfx_off_ctrl(pdd->dev->adev, false); 591 pdd->spi_dbg_override = 592 pdd->dev->kfd2kgd->disable_debug_trap( 593 pdd->dev->adev, 594 target->runtime_info.ttmp_setup, 595 pdd->dev->vm_info.last_vmid_kfd); 596 amdgpu_gfx_off_ctrl(pdd->dev->adev, true); 597 598 if (!kfd_dbg_is_per_vmid_supported(pdd->dev) && 599 release_debug_trap_vmid(pdd->dev->dqm, &pdd->qpd)) 600 pr_err("Failed to release debug vmid on [%i]\n", pdd->dev->id); 601 602 if (!pdd->dev->kfd->shared_resources.enable_mes) 603 debug_refresh_runlist(pdd->dev->dqm); 604 else 605 kfd_dbg_set_mes_debug_mode(pdd); 606 } 607 608 kfd_dbg_set_workaround(target, false); 609 } 610 611 static void kfd_dbg_clean_exception_status(struct kfd_process *target) 612 { 613 struct process_queue_manager *pqm; 614 struct process_queue_node *pqn; 615 int i; 616 617 for (i = 0; i < target->n_pdds; i++) { 618 struct kfd_process_device *pdd = target->pdds[i]; 619 620 kfd_process_drain_interrupts(pdd); 621 622 pdd->exception_status = 0; 623 } 624 625 pqm = &target->pqm; 626 list_for_each_entry(pqn, &pqm->queues, process_queue_list) { 627 if (!pqn->q) 628 continue; 629 630 pqn->q->properties.exception_status = 0; 631 } 632 633 target->exception_status = 0; 634 } 635 636 int kfd_dbg_trap_disable(struct kfd_process *target) 637 { 638 if (!target->debug_trap_enabled) 639 return 0; 640 641 /* 642 * Defer deactivation to runtime if runtime not enabled otherwise reset 643 * attached running target runtime state to enable for re-attach. 644 */ 645 if (target->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED) 646 kfd_dbg_trap_deactivate(target, false, 0); 647 else if (target->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_DISABLED) 648 target->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_ENABLED; 649 650 fput(target->dbg_ev_file); 651 target->dbg_ev_file = NULL; 652 653 if (target->debugger_process) { 654 atomic_dec(&target->debugger_process->debugged_process_count); 655 target->debugger_process = NULL; 656 } 657 658 target->debug_trap_enabled = false; 659 kfd_dbg_clean_exception_status(target); 660 kfd_unref_process(target); 661 662 return 0; 663 } 664 665 int kfd_dbg_trap_activate(struct kfd_process *target) 666 { 667 int i, r = 0; 668 669 r = kfd_dbg_set_workaround(target, true); 670 if (r) 671 return r; 672 673 for (i = 0; i < target->n_pdds; i++) { 674 struct kfd_process_device *pdd = target->pdds[i]; 675 676 if (!kfd_dbg_is_per_vmid_supported(pdd->dev)) { 677 r = reserve_debug_trap_vmid(pdd->dev->dqm, &pdd->qpd); 678 679 if (r) { 680 target->runtime_info.runtime_state = (r == -EBUSY) ? 681 DEBUG_RUNTIME_STATE_ENABLED_BUSY : 682 DEBUG_RUNTIME_STATE_ENABLED_ERROR; 683 684 goto unwind_err; 685 } 686 } 687 688 /* Disable GFX OFF to prevent garbage read/writes to debug registers. 689 * If RLC restore of debug registers is not supported and runtime enable 690 * hasn't done so already on ttmp setup request, restore the trap config registers. 691 * 692 * If RLC restore of debug registers is not supported, keep gfx off disabled for 693 * the debug session. 694 */ 695 amdgpu_gfx_off_ctrl(pdd->dev->adev, false); 696 if (!(kfd_dbg_is_rlc_restore_supported(pdd->dev) || 697 target->runtime_info.ttmp_setup)) 698 pdd->dev->kfd2kgd->enable_debug_trap(pdd->dev->adev, true, 699 pdd->dev->vm_info.last_vmid_kfd); 700 701 pdd->spi_dbg_override = pdd->dev->kfd2kgd->enable_debug_trap( 702 pdd->dev->adev, 703 false, 704 pdd->dev->vm_info.last_vmid_kfd); 705 706 if (kfd_dbg_is_rlc_restore_supported(pdd->dev)) 707 amdgpu_gfx_off_ctrl(pdd->dev->adev, true); 708 709 /* 710 * Setting the debug flag in the trap handler requires that the TMA has been 711 * allocated, which occurs during CWSR initialization. 712 * In the event that CWSR has not been initialized at this point, setting the 713 * flag will be called again during CWSR initialization if the target process 714 * is still debug enabled. 715 */ 716 kfd_process_set_trap_debug_flag(&pdd->qpd, true); 717 718 if (!pdd->dev->kfd->shared_resources.enable_mes) 719 r = debug_refresh_runlist(pdd->dev->dqm); 720 else 721 r = kfd_dbg_set_mes_debug_mode(pdd); 722 723 if (r) { 724 target->runtime_info.runtime_state = 725 DEBUG_RUNTIME_STATE_ENABLED_ERROR; 726 goto unwind_err; 727 } 728 } 729 730 return 0; 731 732 unwind_err: 733 /* Enabling debug failed, we need to disable on 734 * all GPUs so the enable is all or nothing. 735 */ 736 kfd_dbg_trap_deactivate(target, true, i); 737 return r; 738 } 739 740 int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd, 741 void __user *runtime_info, uint32_t *runtime_size) 742 { 743 struct file *f; 744 uint32_t copy_size; 745 int i, r = 0; 746 747 if (target->debug_trap_enabled) 748 return -EALREADY; 749 750 /* Enable pre-checks */ 751 for (i = 0; i < target->n_pdds; i++) { 752 struct kfd_process_device *pdd = target->pdds[i]; 753 754 if (!KFD_IS_SOC15(pdd->dev)) 755 return -ENODEV; 756 757 if (!kfd_dbg_has_gws_support(pdd->dev) && pdd->qpd.num_gws) 758 return -EBUSY; 759 } 760 761 copy_size = min((size_t)(*runtime_size), sizeof(target->runtime_info)); 762 763 f = fget(fd); 764 if (!f) { 765 pr_err("Failed to get file for (%i)\n", fd); 766 return -EBADF; 767 } 768 769 target->dbg_ev_file = f; 770 771 /* defer activation to runtime if not runtime enabled */ 772 if (target->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED) 773 kfd_dbg_trap_activate(target); 774 775 /* We already hold the process reference but hold another one for the 776 * debug session. 777 */ 778 kref_get(&target->ref); 779 target->debug_trap_enabled = true; 780 781 if (target->debugger_process) 782 atomic_inc(&target->debugger_process->debugged_process_count); 783 784 if (copy_to_user(runtime_info, (void *)&target->runtime_info, copy_size)) { 785 kfd_dbg_trap_deactivate(target, false, 0); 786 r = -EFAULT; 787 } 788 789 *runtime_size = sizeof(target->runtime_info); 790 791 return r; 792 } 793 794 static int kfd_dbg_validate_trap_override_request(struct kfd_process *p, 795 uint32_t trap_override, 796 uint32_t trap_mask_request, 797 uint32_t *trap_mask_supported) 798 { 799 int i = 0; 800 801 *trap_mask_supported = 0xffffffff; 802 803 for (i = 0; i < p->n_pdds; i++) { 804 struct kfd_process_device *pdd = p->pdds[i]; 805 int err = pdd->dev->kfd2kgd->validate_trap_override_request( 806 pdd->dev->adev, 807 trap_override, 808 trap_mask_supported); 809 810 if (err) 811 return err; 812 } 813 814 if (trap_mask_request & ~*trap_mask_supported) 815 return -EACCES; 816 817 return 0; 818 } 819 820 int kfd_dbg_trap_set_wave_launch_override(struct kfd_process *target, 821 uint32_t trap_override, 822 uint32_t trap_mask_bits, 823 uint32_t trap_mask_request, 824 uint32_t *trap_mask_prev, 825 uint32_t *trap_mask_supported) 826 { 827 int r = 0, i; 828 829 r = kfd_dbg_validate_trap_override_request(target, 830 trap_override, 831 trap_mask_request, 832 trap_mask_supported); 833 834 if (r) 835 return r; 836 837 for (i = 0; i < target->n_pdds; i++) { 838 struct kfd_process_device *pdd = target->pdds[i]; 839 840 amdgpu_gfx_off_ctrl(pdd->dev->adev, false); 841 pdd->spi_dbg_override = pdd->dev->kfd2kgd->set_wave_launch_trap_override( 842 pdd->dev->adev, 843 pdd->dev->vm_info.last_vmid_kfd, 844 trap_override, 845 trap_mask_bits, 846 trap_mask_request, 847 trap_mask_prev, 848 pdd->spi_dbg_override); 849 amdgpu_gfx_off_ctrl(pdd->dev->adev, true); 850 851 if (!pdd->dev->kfd->shared_resources.enable_mes) 852 r = debug_refresh_runlist(pdd->dev->dqm); 853 else 854 r = kfd_dbg_set_mes_debug_mode(pdd); 855 856 if (r) 857 break; 858 } 859 860 return r; 861 } 862 863 int kfd_dbg_trap_set_wave_launch_mode(struct kfd_process *target, 864 uint8_t wave_launch_mode) 865 { 866 int r = 0, i; 867 868 if (wave_launch_mode != KFD_DBG_TRAP_WAVE_LAUNCH_MODE_NORMAL && 869 wave_launch_mode != KFD_DBG_TRAP_WAVE_LAUNCH_MODE_HALT && 870 wave_launch_mode != KFD_DBG_TRAP_WAVE_LAUNCH_MODE_DEBUG) 871 return -EINVAL; 872 873 for (i = 0; i < target->n_pdds; i++) { 874 struct kfd_process_device *pdd = target->pdds[i]; 875 876 amdgpu_gfx_off_ctrl(pdd->dev->adev, false); 877 pdd->spi_dbg_launch_mode = pdd->dev->kfd2kgd->set_wave_launch_mode( 878 pdd->dev->adev, 879 wave_launch_mode, 880 pdd->dev->vm_info.last_vmid_kfd); 881 amdgpu_gfx_off_ctrl(pdd->dev->adev, true); 882 883 if (!pdd->dev->kfd->shared_resources.enable_mes) 884 r = debug_refresh_runlist(pdd->dev->dqm); 885 else 886 r = kfd_dbg_set_mes_debug_mode(pdd); 887 888 if (r) 889 break; 890 } 891 892 return r; 893 } 894 895 int kfd_dbg_trap_query_exception_info(struct kfd_process *target, 896 uint32_t source_id, 897 uint32_t exception_code, 898 bool clear_exception, 899 void __user *info, 900 uint32_t *info_size) 901 { 902 bool found = false; 903 int r = 0; 904 uint32_t copy_size, actual_info_size = 0; 905 uint64_t *exception_status_ptr = NULL; 906 907 if (!target) 908 return -EINVAL; 909 910 if (!info || !info_size) 911 return -EINVAL; 912 913 mutex_lock(&target->event_mutex); 914 915 if (KFD_DBG_EC_TYPE_IS_QUEUE(exception_code)) { 916 /* Per queue exceptions */ 917 struct queue *queue = NULL; 918 int i; 919 920 for (i = 0; i < target->n_pdds; i++) { 921 struct kfd_process_device *pdd = target->pdds[i]; 922 struct qcm_process_device *qpd = &pdd->qpd; 923 924 list_for_each_entry(queue, &qpd->queues_list, list) { 925 if (!found && queue->properties.queue_id == source_id) { 926 found = true; 927 break; 928 } 929 } 930 if (found) 931 break; 932 } 933 934 if (!found) { 935 r = -EINVAL; 936 goto out; 937 } 938 939 if (!(queue->properties.exception_status & KFD_EC_MASK(exception_code))) { 940 r = -ENODATA; 941 goto out; 942 } 943 exception_status_ptr = &queue->properties.exception_status; 944 } else if (KFD_DBG_EC_TYPE_IS_DEVICE(exception_code)) { 945 /* Per device exceptions */ 946 struct kfd_process_device *pdd = NULL; 947 int i; 948 949 for (i = 0; i < target->n_pdds; i++) { 950 pdd = target->pdds[i]; 951 if (pdd->dev->id == source_id) { 952 found = true; 953 break; 954 } 955 } 956 957 if (!found) { 958 r = -EINVAL; 959 goto out; 960 } 961 962 if (!(pdd->exception_status & KFD_EC_MASK(exception_code))) { 963 r = -ENODATA; 964 goto out; 965 } 966 967 if (exception_code == EC_DEVICE_MEMORY_VIOLATION) { 968 copy_size = min((size_t)(*info_size), pdd->vm_fault_exc_data_size); 969 970 if (copy_to_user(info, pdd->vm_fault_exc_data, copy_size)) { 971 r = -EFAULT; 972 goto out; 973 } 974 actual_info_size = pdd->vm_fault_exc_data_size; 975 if (clear_exception) { 976 kfree(pdd->vm_fault_exc_data); 977 pdd->vm_fault_exc_data = NULL; 978 pdd->vm_fault_exc_data_size = 0; 979 } 980 } 981 exception_status_ptr = &pdd->exception_status; 982 } else if (KFD_DBG_EC_TYPE_IS_PROCESS(exception_code)) { 983 /* Per process exceptions */ 984 if (!(target->exception_status & KFD_EC_MASK(exception_code))) { 985 r = -ENODATA; 986 goto out; 987 } 988 989 if (exception_code == EC_PROCESS_RUNTIME) { 990 copy_size = min((size_t)(*info_size), sizeof(target->runtime_info)); 991 992 if (copy_to_user(info, (void *)&target->runtime_info, copy_size)) { 993 r = -EFAULT; 994 goto out; 995 } 996 997 actual_info_size = sizeof(target->runtime_info); 998 } 999 1000 exception_status_ptr = &target->exception_status; 1001 } else { 1002 pr_debug("Bad exception type [%i]\n", exception_code); 1003 r = -EINVAL; 1004 goto out; 1005 } 1006 1007 *info_size = actual_info_size; 1008 if (clear_exception) 1009 *exception_status_ptr &= ~KFD_EC_MASK(exception_code); 1010 out: 1011 mutex_unlock(&target->event_mutex); 1012 return r; 1013 } 1014 1015 int kfd_dbg_trap_device_snapshot(struct kfd_process *target, 1016 uint64_t exception_clear_mask, 1017 void __user *user_info, 1018 uint32_t *number_of_device_infos, 1019 uint32_t *entry_size) 1020 { 1021 struct kfd_dbg_device_info_entry device_info; 1022 uint32_t tmp_entry_size = *entry_size, tmp_num_devices; 1023 int i, r = 0; 1024 1025 if (!(target && user_info && number_of_device_infos && entry_size)) 1026 return -EINVAL; 1027 1028 tmp_num_devices = min_t(size_t, *number_of_device_infos, target->n_pdds); 1029 *number_of_device_infos = target->n_pdds; 1030 *entry_size = min_t(size_t, *entry_size, sizeof(device_info)); 1031 1032 if (!tmp_num_devices) 1033 return 0; 1034 1035 memset(&device_info, 0, sizeof(device_info)); 1036 1037 mutex_lock(&target->event_mutex); 1038 1039 /* Run over all pdd of the process */ 1040 for (i = 0; i < tmp_num_devices; i++) { 1041 struct kfd_process_device *pdd = target->pdds[i]; 1042 struct kfd_topology_device *topo_dev = kfd_topology_device_by_id(pdd->dev->id); 1043 1044 device_info.gpu_id = pdd->dev->id; 1045 device_info.exception_status = pdd->exception_status; 1046 device_info.lds_base = pdd->lds_base; 1047 device_info.lds_limit = pdd->lds_limit; 1048 device_info.scratch_base = pdd->scratch_base; 1049 device_info.scratch_limit = pdd->scratch_limit; 1050 device_info.gpuvm_base = pdd->gpuvm_base; 1051 device_info.gpuvm_limit = pdd->gpuvm_limit; 1052 device_info.location_id = topo_dev->node_props.location_id; 1053 device_info.vendor_id = topo_dev->node_props.vendor_id; 1054 device_info.device_id = topo_dev->node_props.device_id; 1055 device_info.revision_id = pdd->dev->adev->pdev->revision; 1056 device_info.subsystem_vendor_id = pdd->dev->adev->pdev->subsystem_vendor; 1057 device_info.subsystem_device_id = pdd->dev->adev->pdev->subsystem_device; 1058 device_info.fw_version = pdd->dev->kfd->mec_fw_version; 1059 device_info.gfx_target_version = 1060 topo_dev->node_props.gfx_target_version; 1061 device_info.simd_count = topo_dev->node_props.simd_count; 1062 device_info.max_waves_per_simd = 1063 topo_dev->node_props.max_waves_per_simd; 1064 device_info.array_count = topo_dev->node_props.array_count; 1065 device_info.simd_arrays_per_engine = 1066 topo_dev->node_props.simd_arrays_per_engine; 1067 device_info.num_xcc = NUM_XCC(pdd->dev->xcc_mask); 1068 device_info.capability = topo_dev->node_props.capability; 1069 device_info.debug_prop = topo_dev->node_props.debug_prop; 1070 1071 if (exception_clear_mask) 1072 pdd->exception_status &= ~exception_clear_mask; 1073 1074 if (copy_to_user(user_info, &device_info, *entry_size)) { 1075 r = -EFAULT; 1076 break; 1077 } 1078 1079 user_info += tmp_entry_size; 1080 } 1081 1082 mutex_unlock(&target->event_mutex); 1083 1084 return r; 1085 } 1086 1087 void kfd_dbg_set_enabled_debug_exception_mask(struct kfd_process *target, 1088 uint64_t exception_set_mask) 1089 { 1090 uint64_t found_mask = 0; 1091 struct process_queue_manager *pqm; 1092 struct process_queue_node *pqn; 1093 static const char write_data = '.'; 1094 loff_t pos = 0; 1095 int i; 1096 1097 mutex_lock(&target->event_mutex); 1098 1099 found_mask |= target->exception_status; 1100 1101 pqm = &target->pqm; 1102 list_for_each_entry(pqn, &pqm->queues, process_queue_list) { 1103 if (!pqn->q) 1104 continue; 1105 1106 found_mask |= pqn->q->properties.exception_status; 1107 } 1108 1109 for (i = 0; i < target->n_pdds; i++) { 1110 struct kfd_process_device *pdd = target->pdds[i]; 1111 1112 found_mask |= pdd->exception_status; 1113 } 1114 1115 if (exception_set_mask & found_mask) 1116 kernel_write(target->dbg_ev_file, &write_data, 1, &pos); 1117 1118 target->exception_enable_mask = exception_set_mask; 1119 1120 mutex_unlock(&target->event_mutex); 1121 } 1122