1 /* 2 * Copyright 2009 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 * Dave Airlie 30 */ 31 #include <linux/seq_file.h> 32 #include <linux/atomic.h> 33 #include <linux/wait.h> 34 #include <linux/kref.h> 35 #include <linux/slab.h> 36 #include <linux/firmware.h> 37 #include <linux/pm_runtime.h> 38 39 #include <drm/drm_drv.h> 40 #include "amdgpu.h" 41 #include "amdgpu_trace.h" 42 #include "amdgpu_reset.h" 43 44 /* 45 * Fences 46 * Fences mark an event in the GPUs pipeline and are used 47 * for GPU/CPU synchronization. When the fence is written, 48 * it is expected that all buffers associated with that fence 49 * are no longer in use by the associated ring on the GPU and 50 * that the relevant GPU caches have been flushed. 51 */ 52 53 struct amdgpu_fence { 54 struct dma_fence base; 55 56 /* RB, DMA, etc. */ 57 struct amdgpu_ring *ring; 58 }; 59 60 static struct kmem_cache *amdgpu_fence_slab; 61 62 int amdgpu_fence_slab_init(void) 63 { 64 amdgpu_fence_slab = kmem_cache_create( 65 "amdgpu_fence", sizeof(struct amdgpu_fence), 0, 66 SLAB_HWCACHE_ALIGN, NULL); 67 if (!amdgpu_fence_slab) 68 return -ENOMEM; 69 return 0; 70 } 71 72 void amdgpu_fence_slab_fini(void) 73 { 74 rcu_barrier(); 75 kmem_cache_destroy(amdgpu_fence_slab); 76 } 77 /* 78 * Cast helper 79 */ 80 static const struct dma_fence_ops amdgpu_fence_ops; 81 static const struct dma_fence_ops amdgpu_job_fence_ops; 82 static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f) 83 { 84 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); 85 86 if (__f->base.ops == &amdgpu_fence_ops || 87 __f->base.ops == &amdgpu_job_fence_ops) 88 return __f; 89 90 return NULL; 91 } 92 93 /** 94 * amdgpu_fence_write - write a fence value 95 * 96 * @ring: ring the fence is associated with 97 * @seq: sequence number to write 98 * 99 * Writes a fence value to memory (all asics). 100 */ 101 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq) 102 { 103 struct amdgpu_fence_driver *drv = &ring->fence_drv; 104 105 if (drv->cpu_addr) 106 *drv->cpu_addr = cpu_to_le32(seq); 107 } 108 109 /** 110 * amdgpu_fence_read - read a fence value 111 * 112 * @ring: ring the fence is associated with 113 * 114 * Reads a fence value from memory (all asics). 115 * Returns the value of the fence read from memory. 116 */ 117 static u32 amdgpu_fence_read(struct amdgpu_ring *ring) 118 { 119 struct amdgpu_fence_driver *drv = &ring->fence_drv; 120 u32 seq = 0; 121 122 if (drv->cpu_addr) 123 seq = le32_to_cpu(*drv->cpu_addr); 124 else 125 seq = atomic_read(&drv->last_seq); 126 127 return seq; 128 } 129 130 /** 131 * amdgpu_fence_emit - emit a fence on the requested ring 132 * 133 * @ring: ring the fence is associated with 134 * @f: resulting fence object 135 * @job: job the fence is embedded in 136 * @flags: flags to pass into the subordinate .emit_fence() call 137 * 138 * Emits a fence command on the requested ring (all asics). 139 * Returns 0 on success, -ENOMEM on failure. 140 */ 141 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job, 142 unsigned flags) 143 { 144 struct amdgpu_device *adev = ring->adev; 145 struct dma_fence *fence; 146 struct amdgpu_fence *am_fence; 147 struct dma_fence __rcu **ptr; 148 uint32_t seq; 149 int r; 150 151 if (job == NULL) { 152 /* create a sperate hw fence */ 153 am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC); 154 if (am_fence == NULL) 155 return -ENOMEM; 156 fence = &am_fence->base; 157 am_fence->ring = ring; 158 } else { 159 /* take use of job-embedded fence */ 160 fence = &job->hw_fence; 161 } 162 163 seq = ++ring->fence_drv.sync_seq; 164 if (job && job->job_run_counter) { 165 /* reinit seq for resubmitted jobs */ 166 fence->seqno = seq; 167 /* TO be inline with external fence creation and other drivers */ 168 dma_fence_get(fence); 169 } else { 170 if (job) { 171 dma_fence_init(fence, &amdgpu_job_fence_ops, 172 &ring->fence_drv.lock, 173 adev->fence_context + ring->idx, seq); 174 /* Against remove in amdgpu_job_{free, free_cb} */ 175 dma_fence_get(fence); 176 } 177 else 178 dma_fence_init(fence, &amdgpu_fence_ops, 179 &ring->fence_drv.lock, 180 adev->fence_context + ring->idx, seq); 181 } 182 183 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 184 seq, flags | AMDGPU_FENCE_FLAG_INT); 185 pm_runtime_get_noresume(adev_to_drm(adev)->dev); 186 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; 187 if (unlikely(rcu_dereference_protected(*ptr, 1))) { 188 struct dma_fence *old; 189 190 rcu_read_lock(); 191 old = dma_fence_get_rcu_safe(ptr); 192 rcu_read_unlock(); 193 194 if (old) { 195 r = dma_fence_wait(old, false); 196 dma_fence_put(old); 197 if (r) 198 return r; 199 } 200 } 201 202 /* This function can't be called concurrently anyway, otherwise 203 * emitting the fence would mess up the hardware ring buffer. 204 */ 205 rcu_assign_pointer(*ptr, dma_fence_get(fence)); 206 207 *f = fence; 208 209 return 0; 210 } 211 212 /** 213 * amdgpu_fence_emit_polling - emit a fence on the requeste ring 214 * 215 * @ring: ring the fence is associated with 216 * @s: resulting sequence number 217 * @timeout: the timeout for waiting in usecs 218 * 219 * Emits a fence command on the requested ring (all asics). 220 * Used For polling fence. 221 * Returns 0 on success, -ENOMEM on failure. 222 */ 223 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s, 224 uint32_t timeout) 225 { 226 uint32_t seq; 227 signed long r; 228 229 if (!s) 230 return -EINVAL; 231 232 seq = ++ring->fence_drv.sync_seq; 233 r = amdgpu_fence_wait_polling(ring, 234 seq - ring->fence_drv.num_fences_mask, 235 timeout); 236 if (r < 1) 237 return -ETIMEDOUT; 238 239 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 240 seq, 0); 241 242 *s = seq; 243 244 return 0; 245 } 246 247 /** 248 * amdgpu_fence_schedule_fallback - schedule fallback check 249 * 250 * @ring: pointer to struct amdgpu_ring 251 * 252 * Start a timer as fallback to our interrupts. 253 */ 254 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring) 255 { 256 mod_timer(&ring->fence_drv.fallback_timer, 257 jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT); 258 } 259 260 /** 261 * amdgpu_fence_process - check for fence activity 262 * 263 * @ring: pointer to struct amdgpu_ring 264 * 265 * Checks the current fence value and calculates the last 266 * signalled fence value. Wakes the fence queue if the 267 * sequence number has increased. 268 * 269 * Returns true if fence was processed 270 */ 271 bool amdgpu_fence_process(struct amdgpu_ring *ring) 272 { 273 struct amdgpu_fence_driver *drv = &ring->fence_drv; 274 struct amdgpu_device *adev = ring->adev; 275 uint32_t seq, last_seq; 276 277 do { 278 last_seq = atomic_read(&ring->fence_drv.last_seq); 279 seq = amdgpu_fence_read(ring); 280 281 } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq); 282 283 if (del_timer(&ring->fence_drv.fallback_timer) && 284 seq != ring->fence_drv.sync_seq) 285 amdgpu_fence_schedule_fallback(ring); 286 287 if (unlikely(seq == last_seq)) 288 return false; 289 290 last_seq &= drv->num_fences_mask; 291 seq &= drv->num_fences_mask; 292 293 do { 294 struct dma_fence *fence, **ptr; 295 296 ++last_seq; 297 last_seq &= drv->num_fences_mask; 298 ptr = &drv->fences[last_seq]; 299 300 /* There is always exactly one thread signaling this fence slot */ 301 fence = rcu_dereference_protected(*ptr, 1); 302 RCU_INIT_POINTER(*ptr, NULL); 303 304 if (!fence) 305 continue; 306 307 dma_fence_signal(fence); 308 dma_fence_put(fence); 309 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 310 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 311 } while (last_seq != seq); 312 313 return true; 314 } 315 316 /** 317 * amdgpu_fence_fallback - fallback for hardware interrupts 318 * 319 * @t: timer context used to obtain the pointer to ring structure 320 * 321 * Checks for fence activity. 322 */ 323 static void amdgpu_fence_fallback(struct timer_list *t) 324 { 325 struct amdgpu_ring *ring = from_timer(ring, t, 326 fence_drv.fallback_timer); 327 328 if (amdgpu_fence_process(ring)) 329 DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name); 330 } 331 332 /** 333 * amdgpu_fence_wait_empty - wait for all fences to signal 334 * 335 * @ring: ring index the fence is associated with 336 * 337 * Wait for all fences on the requested ring to signal (all asics). 338 * Returns 0 if the fences have passed, error for all other cases. 339 */ 340 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) 341 { 342 uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq); 343 struct dma_fence *fence, **ptr; 344 int r; 345 346 if (!seq) 347 return 0; 348 349 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; 350 rcu_read_lock(); 351 fence = rcu_dereference(*ptr); 352 if (!fence || !dma_fence_get_rcu(fence)) { 353 rcu_read_unlock(); 354 return 0; 355 } 356 rcu_read_unlock(); 357 358 r = dma_fence_wait(fence, false); 359 dma_fence_put(fence); 360 return r; 361 } 362 363 /** 364 * amdgpu_fence_wait_polling - busy wait for givn sequence number 365 * 366 * @ring: ring index the fence is associated with 367 * @wait_seq: sequence number to wait 368 * @timeout: the timeout for waiting in usecs 369 * 370 * Wait for all fences on the requested ring to signal (all asics). 371 * Returns left time if no timeout, 0 or minus if timeout. 372 */ 373 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring, 374 uint32_t wait_seq, 375 signed long timeout) 376 { 377 uint32_t seq; 378 379 do { 380 seq = amdgpu_fence_read(ring); 381 udelay(5); 382 timeout -= 5; 383 } while ((int32_t)(wait_seq - seq) > 0 && timeout > 0); 384 385 return timeout > 0 ? timeout : 0; 386 } 387 /** 388 * amdgpu_fence_count_emitted - get the count of emitted fences 389 * 390 * @ring: ring the fence is associated with 391 * 392 * Get the number of fences emitted on the requested ring (all asics). 393 * Returns the number of emitted fences on the ring. Used by the 394 * dynpm code to ring track activity. 395 */ 396 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring) 397 { 398 uint64_t emitted; 399 400 /* We are not protected by ring lock when reading the last sequence 401 * but it's ok to report slightly wrong fence count here. 402 */ 403 emitted = 0x100000000ull; 404 emitted -= atomic_read(&ring->fence_drv.last_seq); 405 emitted += READ_ONCE(ring->fence_drv.sync_seq); 406 return lower_32_bits(emitted); 407 } 408 409 /** 410 * amdgpu_fence_driver_start_ring - make the fence driver 411 * ready for use on the requested ring. 412 * 413 * @ring: ring to start the fence driver on 414 * @irq_src: interrupt source to use for this ring 415 * @irq_type: interrupt type to use for this ring 416 * 417 * Make the fence driver ready for processing (all asics). 418 * Not all asics have all rings, so each asic will only 419 * start the fence driver on the rings it has. 420 * Returns 0 for success, errors for failure. 421 */ 422 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, 423 struct amdgpu_irq_src *irq_src, 424 unsigned irq_type) 425 { 426 struct amdgpu_device *adev = ring->adev; 427 uint64_t index; 428 429 if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) { 430 ring->fence_drv.cpu_addr = ring->fence_cpu_addr; 431 ring->fence_drv.gpu_addr = ring->fence_gpu_addr; 432 } else { 433 /* put fence directly behind firmware */ 434 index = ALIGN(adev->uvd.fw->size, 8); 435 ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index; 436 ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index; 437 } 438 amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); 439 440 ring->fence_drv.irq_src = irq_src; 441 ring->fence_drv.irq_type = irq_type; 442 ring->fence_drv.initialized = true; 443 444 DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr 0x%016llx\n", 445 ring->name, ring->fence_drv.gpu_addr); 446 return 0; 447 } 448 449 /** 450 * amdgpu_fence_driver_init_ring - init the fence driver 451 * for the requested ring. 452 * 453 * @ring: ring to init the fence driver on 454 * 455 * Init the fence driver for the requested ring (all asics). 456 * Helper function for amdgpu_fence_driver_init(). 457 */ 458 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) 459 { 460 struct amdgpu_device *adev = ring->adev; 461 462 if (!adev) 463 return -EINVAL; 464 465 if (!is_power_of_2(ring->num_hw_submission)) 466 return -EINVAL; 467 468 ring->fence_drv.cpu_addr = NULL; 469 ring->fence_drv.gpu_addr = 0; 470 ring->fence_drv.sync_seq = 0; 471 atomic_set(&ring->fence_drv.last_seq, 0); 472 ring->fence_drv.initialized = false; 473 474 timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0); 475 476 ring->fence_drv.num_fences_mask = ring->num_hw_submission * 2 - 1; 477 spin_lock_init(&ring->fence_drv.lock); 478 ring->fence_drv.fences = kcalloc(ring->num_hw_submission * 2, sizeof(void *), 479 GFP_KERNEL); 480 481 if (!ring->fence_drv.fences) 482 return -ENOMEM; 483 484 return 0; 485 } 486 487 /** 488 * amdgpu_fence_driver_sw_init - init the fence driver 489 * for all possible rings. 490 * 491 * @adev: amdgpu device pointer 492 * 493 * Init the fence driver for all possible rings (all asics). 494 * Not all asics have all rings, so each asic will only 495 * start the fence driver on the rings it has using 496 * amdgpu_fence_driver_start_ring(). 497 * Returns 0 for success. 498 */ 499 int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev) 500 { 501 return 0; 502 } 503 504 /** 505 * amdgpu_fence_driver_hw_fini - tear down the fence driver 506 * for all possible rings. 507 * 508 * @adev: amdgpu device pointer 509 * 510 * Tear down the fence driver for all possible rings (all asics). 511 */ 512 void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev) 513 { 514 int i, r; 515 516 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 517 struct amdgpu_ring *ring = adev->rings[i]; 518 519 if (!ring || !ring->fence_drv.initialized) 520 continue; 521 522 /* You can't wait for HW to signal if it's gone */ 523 if (!drm_dev_is_unplugged(adev_to_drm(adev))) 524 r = amdgpu_fence_wait_empty(ring); 525 else 526 r = -ENODEV; 527 /* no need to trigger GPU reset as we are unloading */ 528 if (r) 529 amdgpu_fence_driver_force_completion(ring); 530 531 if (ring->fence_drv.irq_src) 532 amdgpu_irq_put(adev, ring->fence_drv.irq_src, 533 ring->fence_drv.irq_type); 534 535 del_timer_sync(&ring->fence_drv.fallback_timer); 536 } 537 } 538 539 /* Will either stop and flush handlers for amdgpu interrupt or reanble it */ 540 void amdgpu_fence_driver_isr_toggle(struct amdgpu_device *adev, bool stop) 541 { 542 int i; 543 544 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 545 struct amdgpu_ring *ring = adev->rings[i]; 546 547 if (!ring || !ring->fence_drv.initialized || !ring->fence_drv.irq_src) 548 continue; 549 550 if (stop) 551 disable_irq(adev->irq.irq); 552 else 553 enable_irq(adev->irq.irq); 554 } 555 } 556 557 void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev) 558 { 559 unsigned int i, j; 560 561 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 562 struct amdgpu_ring *ring = adev->rings[i]; 563 564 if (!ring || !ring->fence_drv.initialized) 565 continue; 566 567 if (!ring->no_scheduler) 568 drm_sched_fini(&ring->sched); 569 570 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) 571 dma_fence_put(ring->fence_drv.fences[j]); 572 kfree(ring->fence_drv.fences); 573 ring->fence_drv.fences = NULL; 574 ring->fence_drv.initialized = false; 575 } 576 } 577 578 /** 579 * amdgpu_fence_driver_hw_init - enable the fence driver 580 * for all possible rings. 581 * 582 * @adev: amdgpu device pointer 583 * 584 * Enable the fence driver for all possible rings (all asics). 585 * Not all asics have all rings, so each asic will only 586 * start the fence driver on the rings it has using 587 * amdgpu_fence_driver_start_ring(). 588 * Returns 0 for success. 589 */ 590 void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev) 591 { 592 int i; 593 594 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 595 struct amdgpu_ring *ring = adev->rings[i]; 596 if (!ring || !ring->fence_drv.initialized) 597 continue; 598 599 /* enable the interrupt */ 600 if (ring->fence_drv.irq_src) 601 amdgpu_irq_get(adev, ring->fence_drv.irq_src, 602 ring->fence_drv.irq_type); 603 } 604 } 605 606 /** 607 * amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring 608 * 609 * @ring: fence of the ring to be cleared 610 * 611 */ 612 void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring) 613 { 614 int i; 615 struct dma_fence *old, **ptr; 616 617 for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) { 618 ptr = &ring->fence_drv.fences[i]; 619 old = rcu_dereference_protected(*ptr, 1); 620 if (old && old->ops == &amdgpu_job_fence_ops) { 621 RCU_INIT_POINTER(*ptr, NULL); 622 dma_fence_put(old); 623 } 624 } 625 } 626 627 /** 628 * amdgpu_fence_driver_force_completion - force signal latest fence of ring 629 * 630 * @ring: fence of the ring to signal 631 * 632 */ 633 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring) 634 { 635 amdgpu_fence_write(ring, ring->fence_drv.sync_seq); 636 amdgpu_fence_process(ring); 637 } 638 639 /* 640 * Common fence implementation 641 */ 642 643 static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence) 644 { 645 return "amdgpu"; 646 } 647 648 static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f) 649 { 650 return (const char *)to_amdgpu_fence(f)->ring->name; 651 } 652 653 static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f) 654 { 655 struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence); 656 657 return (const char *)to_amdgpu_ring(job->base.sched)->name; 658 } 659 660 /** 661 * amdgpu_fence_enable_signaling - enable signalling on fence 662 * @f: fence 663 * 664 * This function is called with fence_queue lock held, and adds a callback 665 * to fence_queue that checks if this fence is signaled, and if so it 666 * signals the fence and removes itself. 667 */ 668 static bool amdgpu_fence_enable_signaling(struct dma_fence *f) 669 { 670 if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer)) 671 amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring); 672 673 return true; 674 } 675 676 /** 677 * amdgpu_job_fence_enable_signaling - enable signalling on job fence 678 * @f: fence 679 * 680 * This is the simliar function with amdgpu_fence_enable_signaling above, it 681 * only handles the job embedded fence. 682 */ 683 static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f) 684 { 685 struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence); 686 687 if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer)) 688 amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched)); 689 690 return true; 691 } 692 693 /** 694 * amdgpu_fence_free - free up the fence memory 695 * 696 * @rcu: RCU callback head 697 * 698 * Free up the fence memory after the RCU grace period. 699 */ 700 static void amdgpu_fence_free(struct rcu_head *rcu) 701 { 702 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); 703 704 /* free fence_slab if it's separated fence*/ 705 kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f)); 706 } 707 708 /** 709 * amdgpu_job_fence_free - free up the job with embedded fence 710 * 711 * @rcu: RCU callback head 712 * 713 * Free up the job with embedded fence after the RCU grace period. 714 */ 715 static void amdgpu_job_fence_free(struct rcu_head *rcu) 716 { 717 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); 718 719 /* free job if fence has a parent job */ 720 kfree(container_of(f, struct amdgpu_job, hw_fence)); 721 } 722 723 /** 724 * amdgpu_fence_release - callback that fence can be freed 725 * 726 * @f: fence 727 * 728 * This function is called when the reference count becomes zero. 729 * It just RCU schedules freeing up the fence. 730 */ 731 static void amdgpu_fence_release(struct dma_fence *f) 732 { 733 call_rcu(&f->rcu, amdgpu_fence_free); 734 } 735 736 /** 737 * amdgpu_job_fence_release - callback that job embedded fence can be freed 738 * 739 * @f: fence 740 * 741 * This is the simliar function with amdgpu_fence_release above, it 742 * only handles the job embedded fence. 743 */ 744 static void amdgpu_job_fence_release(struct dma_fence *f) 745 { 746 call_rcu(&f->rcu, amdgpu_job_fence_free); 747 } 748 749 static const struct dma_fence_ops amdgpu_fence_ops = { 750 .get_driver_name = amdgpu_fence_get_driver_name, 751 .get_timeline_name = amdgpu_fence_get_timeline_name, 752 .enable_signaling = amdgpu_fence_enable_signaling, 753 .release = amdgpu_fence_release, 754 }; 755 756 static const struct dma_fence_ops amdgpu_job_fence_ops = { 757 .get_driver_name = amdgpu_fence_get_driver_name, 758 .get_timeline_name = amdgpu_job_fence_get_timeline_name, 759 .enable_signaling = amdgpu_job_fence_enable_signaling, 760 .release = amdgpu_job_fence_release, 761 }; 762 763 /* 764 * Fence debugfs 765 */ 766 #if defined(CONFIG_DEBUG_FS) 767 static int amdgpu_debugfs_fence_info_show(struct seq_file *m, void *unused) 768 { 769 struct amdgpu_device *adev = (struct amdgpu_device *)m->private; 770 int i; 771 772 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 773 struct amdgpu_ring *ring = adev->rings[i]; 774 if (!ring || !ring->fence_drv.initialized) 775 continue; 776 777 amdgpu_fence_process(ring); 778 779 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name); 780 seq_printf(m, "Last signaled fence 0x%08x\n", 781 atomic_read(&ring->fence_drv.last_seq)); 782 seq_printf(m, "Last emitted 0x%08x\n", 783 ring->fence_drv.sync_seq); 784 785 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX || 786 ring->funcs->type == AMDGPU_RING_TYPE_SDMA) { 787 seq_printf(m, "Last signaled trailing fence 0x%08x\n", 788 le32_to_cpu(*ring->trail_fence_cpu_addr)); 789 seq_printf(m, "Last emitted 0x%08x\n", 790 ring->trail_seq); 791 } 792 793 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX) 794 continue; 795 796 /* set in CP_VMID_PREEMPT and preemption occurred */ 797 seq_printf(m, "Last preempted 0x%08x\n", 798 le32_to_cpu(*(ring->fence_drv.cpu_addr + 2))); 799 /* set in CP_VMID_RESET and reset occurred */ 800 seq_printf(m, "Last reset 0x%08x\n", 801 le32_to_cpu(*(ring->fence_drv.cpu_addr + 4))); 802 /* Both preemption and reset occurred */ 803 seq_printf(m, "Last both 0x%08x\n", 804 le32_to_cpu(*(ring->fence_drv.cpu_addr + 6))); 805 } 806 return 0; 807 } 808 809 /* 810 * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover 811 * 812 * Manually trigger a gpu reset at the next fence wait. 813 */ 814 static int gpu_recover_get(void *data, u64 *val) 815 { 816 struct amdgpu_device *adev = (struct amdgpu_device *)data; 817 struct drm_device *dev = adev_to_drm(adev); 818 int r; 819 820 r = pm_runtime_get_sync(dev->dev); 821 if (r < 0) { 822 pm_runtime_put_autosuspend(dev->dev); 823 return 0; 824 } 825 826 if (amdgpu_reset_domain_schedule(adev->reset_domain, &adev->reset_work)) 827 flush_work(&adev->reset_work); 828 829 *val = atomic_read(&adev->reset_domain->reset_res); 830 831 pm_runtime_mark_last_busy(dev->dev); 832 pm_runtime_put_autosuspend(dev->dev); 833 834 return 0; 835 } 836 837 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_fence_info); 838 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gpu_recover_fops, gpu_recover_get, NULL, 839 "%lld\n"); 840 841 static void amdgpu_debugfs_reset_work(struct work_struct *work) 842 { 843 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, 844 reset_work); 845 846 struct amdgpu_reset_context reset_context; 847 memset(&reset_context, 0, sizeof(reset_context)); 848 849 reset_context.method = AMD_RESET_METHOD_NONE; 850 reset_context.reset_req_dev = adev; 851 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 852 853 amdgpu_device_gpu_recover(adev, NULL, &reset_context); 854 } 855 856 #endif 857 858 void amdgpu_debugfs_fence_init(struct amdgpu_device *adev) 859 { 860 #if defined(CONFIG_DEBUG_FS) 861 struct drm_minor *minor = adev_to_drm(adev)->primary; 862 struct dentry *root = minor->debugfs_root; 863 864 debugfs_create_file("amdgpu_fence_info", 0444, root, adev, 865 &amdgpu_debugfs_fence_info_fops); 866 867 if (!amdgpu_sriov_vf(adev)) { 868 869 INIT_WORK(&adev->reset_work, amdgpu_debugfs_reset_work); 870 debugfs_create_file("amdgpu_gpu_recover", 0444, root, adev, 871 &amdgpu_debugfs_gpu_recover_fops); 872 } 873 #endif 874 } 875 876