1 /* 2 * Copyright 2009 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 * Dave Airlie 30 */ 31 #include <linux/seq_file.h> 32 #include <linux/atomic.h> 33 #include <linux/wait.h> 34 #include <linux/kref.h> 35 #include <linux/slab.h> 36 #include <linux/firmware.h> 37 #include <linux/pm_runtime.h> 38 39 #include <drm/drm_drv.h> 40 #include "amdgpu.h" 41 #include "amdgpu_trace.h" 42 #include "amdgpu_reset.h" 43 44 /* 45 * Fences mark an event in the GPUs pipeline and are used 46 * for GPU/CPU synchronization. When the fence is written, 47 * it is expected that all buffers associated with that fence 48 * are no longer in use by the associated ring on the GPU and 49 * that the relevant GPU caches have been flushed. 50 */ 51 52 struct amdgpu_fence { 53 struct dma_fence base; 54 55 /* RB, DMA, etc. */ 56 struct amdgpu_ring *ring; 57 ktime_t start_timestamp; 58 }; 59 60 static struct kmem_cache *amdgpu_fence_slab; 61 62 int amdgpu_fence_slab_init(void) 63 { 64 amdgpu_fence_slab = kmem_cache_create( 65 "amdgpu_fence", sizeof(struct amdgpu_fence), 0, 66 SLAB_HWCACHE_ALIGN, NULL); 67 if (!amdgpu_fence_slab) 68 return -ENOMEM; 69 return 0; 70 } 71 72 void amdgpu_fence_slab_fini(void) 73 { 74 rcu_barrier(); 75 kmem_cache_destroy(amdgpu_fence_slab); 76 } 77 /* 78 * Cast helper 79 */ 80 static const struct dma_fence_ops amdgpu_fence_ops; 81 static const struct dma_fence_ops amdgpu_job_fence_ops; 82 static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f) 83 { 84 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); 85 86 if (__f->base.ops == &amdgpu_fence_ops || 87 __f->base.ops == &amdgpu_job_fence_ops) 88 return __f; 89 90 return NULL; 91 } 92 93 /** 94 * amdgpu_fence_write - write a fence value 95 * 96 * @ring: ring the fence is associated with 97 * @seq: sequence number to write 98 * 99 * Writes a fence value to memory (all asics). 100 */ 101 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq) 102 { 103 struct amdgpu_fence_driver *drv = &ring->fence_drv; 104 105 if (drv->cpu_addr) 106 *drv->cpu_addr = cpu_to_le32(seq); 107 } 108 109 /** 110 * amdgpu_fence_read - read a fence value 111 * 112 * @ring: ring the fence is associated with 113 * 114 * Reads a fence value from memory (all asics). 115 * Returns the value of the fence read from memory. 116 */ 117 static u32 amdgpu_fence_read(struct amdgpu_ring *ring) 118 { 119 struct amdgpu_fence_driver *drv = &ring->fence_drv; 120 u32 seq = 0; 121 122 if (drv->cpu_addr) 123 seq = le32_to_cpu(*drv->cpu_addr); 124 else 125 seq = atomic_read(&drv->last_seq); 126 127 return seq; 128 } 129 130 /** 131 * amdgpu_fence_emit - emit a fence on the requested ring 132 * 133 * @ring: ring the fence is associated with 134 * @f: resulting fence object 135 * @job: job the fence is embedded in 136 * @flags: flags to pass into the subordinate .emit_fence() call 137 * 138 * Emits a fence command on the requested ring (all asics). 139 * Returns 0 on success, -ENOMEM on failure. 140 */ 141 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job, 142 unsigned int flags) 143 { 144 struct amdgpu_device *adev = ring->adev; 145 struct dma_fence *fence; 146 struct amdgpu_fence *am_fence; 147 struct dma_fence __rcu **ptr; 148 uint32_t seq; 149 int r; 150 151 if (job == NULL) { 152 /* create a sperate hw fence */ 153 am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC); 154 if (am_fence == NULL) 155 return -ENOMEM; 156 fence = &am_fence->base; 157 am_fence->ring = ring; 158 } else { 159 /* take use of job-embedded fence */ 160 fence = &job->hw_fence; 161 } 162 163 seq = ++ring->fence_drv.sync_seq; 164 if (job && job->job_run_counter) { 165 /* reinit seq for resubmitted jobs */ 166 fence->seqno = seq; 167 /* TO be inline with external fence creation and other drivers */ 168 dma_fence_get(fence); 169 } else { 170 if (job) { 171 dma_fence_init(fence, &amdgpu_job_fence_ops, 172 &ring->fence_drv.lock, 173 adev->fence_context + ring->idx, seq); 174 /* Against remove in amdgpu_job_{free, free_cb} */ 175 dma_fence_get(fence); 176 } else { 177 dma_fence_init(fence, &amdgpu_fence_ops, 178 &ring->fence_drv.lock, 179 adev->fence_context + ring->idx, seq); 180 } 181 } 182 183 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 184 seq, flags | AMDGPU_FENCE_FLAG_INT); 185 pm_runtime_get_noresume(adev_to_drm(adev)->dev); 186 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; 187 if (unlikely(rcu_dereference_protected(*ptr, 1))) { 188 struct dma_fence *old; 189 190 rcu_read_lock(); 191 old = dma_fence_get_rcu_safe(ptr); 192 rcu_read_unlock(); 193 194 if (old) { 195 r = dma_fence_wait(old, false); 196 dma_fence_put(old); 197 if (r) 198 return r; 199 } 200 } 201 202 to_amdgpu_fence(fence)->start_timestamp = ktime_get(); 203 204 /* This function can't be called concurrently anyway, otherwise 205 * emitting the fence would mess up the hardware ring buffer. 206 */ 207 rcu_assign_pointer(*ptr, dma_fence_get(fence)); 208 209 *f = fence; 210 211 return 0; 212 } 213 214 /** 215 * amdgpu_fence_emit_polling - emit a fence on the requeste ring 216 * 217 * @ring: ring the fence is associated with 218 * @s: resulting sequence number 219 * @timeout: the timeout for waiting in usecs 220 * 221 * Emits a fence command on the requested ring (all asics). 222 * Used For polling fence. 223 * Returns 0 on success, -ENOMEM on failure. 224 */ 225 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s, 226 uint32_t timeout) 227 { 228 uint32_t seq; 229 signed long r; 230 231 if (!s) 232 return -EINVAL; 233 234 seq = ++ring->fence_drv.sync_seq; 235 r = amdgpu_fence_wait_polling(ring, 236 seq - ring->fence_drv.num_fences_mask, 237 timeout); 238 if (r < 1) 239 return -ETIMEDOUT; 240 241 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 242 seq, 0); 243 244 *s = seq; 245 246 return 0; 247 } 248 249 /** 250 * amdgpu_fence_schedule_fallback - schedule fallback check 251 * 252 * @ring: pointer to struct amdgpu_ring 253 * 254 * Start a timer as fallback to our interrupts. 255 */ 256 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring) 257 { 258 mod_timer(&ring->fence_drv.fallback_timer, 259 jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT); 260 } 261 262 /** 263 * amdgpu_fence_process - check for fence activity 264 * 265 * @ring: pointer to struct amdgpu_ring 266 * 267 * Checks the current fence value and calculates the last 268 * signalled fence value. Wakes the fence queue if the 269 * sequence number has increased. 270 * 271 * Returns true if fence was processed 272 */ 273 bool amdgpu_fence_process(struct amdgpu_ring *ring) 274 { 275 struct amdgpu_fence_driver *drv = &ring->fence_drv; 276 struct amdgpu_device *adev = ring->adev; 277 uint32_t seq, last_seq; 278 279 do { 280 last_seq = atomic_read(&ring->fence_drv.last_seq); 281 seq = amdgpu_fence_read(ring); 282 283 } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq); 284 285 if (del_timer(&ring->fence_drv.fallback_timer) && 286 seq != ring->fence_drv.sync_seq) 287 amdgpu_fence_schedule_fallback(ring); 288 289 if (unlikely(seq == last_seq)) 290 return false; 291 292 last_seq &= drv->num_fences_mask; 293 seq &= drv->num_fences_mask; 294 295 do { 296 struct dma_fence *fence, **ptr; 297 298 ++last_seq; 299 last_seq &= drv->num_fences_mask; 300 ptr = &drv->fences[last_seq]; 301 302 /* There is always exactly one thread signaling this fence slot */ 303 fence = rcu_dereference_protected(*ptr, 1); 304 RCU_INIT_POINTER(*ptr, NULL); 305 306 if (!fence) 307 continue; 308 309 dma_fence_signal(fence); 310 dma_fence_put(fence); 311 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 312 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 313 } while (last_seq != seq); 314 315 return true; 316 } 317 318 /** 319 * amdgpu_fence_fallback - fallback for hardware interrupts 320 * 321 * @t: timer context used to obtain the pointer to ring structure 322 * 323 * Checks for fence activity. 324 */ 325 static void amdgpu_fence_fallback(struct timer_list *t) 326 { 327 struct amdgpu_ring *ring = from_timer(ring, t, 328 fence_drv.fallback_timer); 329 330 if (amdgpu_fence_process(ring)) 331 DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name); 332 } 333 334 /** 335 * amdgpu_fence_wait_empty - wait for all fences to signal 336 * 337 * @ring: ring index the fence is associated with 338 * 339 * Wait for all fences on the requested ring to signal (all asics). 340 * Returns 0 if the fences have passed, error for all other cases. 341 */ 342 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) 343 { 344 uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq); 345 struct dma_fence *fence, **ptr; 346 int r; 347 348 if (!seq) 349 return 0; 350 351 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; 352 rcu_read_lock(); 353 fence = rcu_dereference(*ptr); 354 if (!fence || !dma_fence_get_rcu(fence)) { 355 rcu_read_unlock(); 356 return 0; 357 } 358 rcu_read_unlock(); 359 360 r = dma_fence_wait(fence, false); 361 dma_fence_put(fence); 362 return r; 363 } 364 365 /** 366 * amdgpu_fence_wait_polling - busy wait for givn sequence number 367 * 368 * @ring: ring index the fence is associated with 369 * @wait_seq: sequence number to wait 370 * @timeout: the timeout for waiting in usecs 371 * 372 * Wait for all fences on the requested ring to signal (all asics). 373 * Returns left time if no timeout, 0 or minus if timeout. 374 */ 375 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring, 376 uint32_t wait_seq, 377 signed long timeout) 378 { 379 uint32_t seq; 380 381 do { 382 seq = amdgpu_fence_read(ring); 383 udelay(5); 384 timeout -= 5; 385 } while ((int32_t)(wait_seq - seq) > 0 && timeout > 0); 386 387 return timeout > 0 ? timeout : 0; 388 } 389 /** 390 * amdgpu_fence_count_emitted - get the count of emitted fences 391 * 392 * @ring: ring the fence is associated with 393 * 394 * Get the number of fences emitted on the requested ring (all asics). 395 * Returns the number of emitted fences on the ring. Used by the 396 * dynpm code to ring track activity. 397 */ 398 unsigned int amdgpu_fence_count_emitted(struct amdgpu_ring *ring) 399 { 400 uint64_t emitted; 401 402 /* We are not protected by ring lock when reading the last sequence 403 * but it's ok to report slightly wrong fence count here. 404 */ 405 emitted = 0x100000000ull; 406 emitted -= atomic_read(&ring->fence_drv.last_seq); 407 emitted += READ_ONCE(ring->fence_drv.sync_seq); 408 return lower_32_bits(emitted); 409 } 410 411 /** 412 * amdgpu_fence_last_unsignaled_time_us - the time fence emitted until now 413 * @ring: ring the fence is associated with 414 * 415 * Find the earliest fence unsignaled until now, calculate the time delta 416 * between the time fence emitted and now. 417 */ 418 u64 amdgpu_fence_last_unsignaled_time_us(struct amdgpu_ring *ring) 419 { 420 struct amdgpu_fence_driver *drv = &ring->fence_drv; 421 struct dma_fence *fence; 422 uint32_t last_seq, sync_seq; 423 424 last_seq = atomic_read(&ring->fence_drv.last_seq); 425 sync_seq = READ_ONCE(ring->fence_drv.sync_seq); 426 if (last_seq == sync_seq) 427 return 0; 428 429 ++last_seq; 430 last_seq &= drv->num_fences_mask; 431 fence = drv->fences[last_seq]; 432 if (!fence) 433 return 0; 434 435 return ktime_us_delta(ktime_get(), 436 to_amdgpu_fence(fence)->start_timestamp); 437 } 438 439 /** 440 * amdgpu_fence_update_start_timestamp - update the timestamp of the fence 441 * @ring: ring the fence is associated with 442 * @seq: the fence seq number to update. 443 * @timestamp: the start timestamp to update. 444 * 445 * The function called at the time the fence and related ib is about to 446 * resubmit to gpu in MCBP scenario. Thus we do not consider race condition 447 * with amdgpu_fence_process to modify the same fence. 448 */ 449 void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq, ktime_t timestamp) 450 { 451 struct amdgpu_fence_driver *drv = &ring->fence_drv; 452 struct dma_fence *fence; 453 454 seq &= drv->num_fences_mask; 455 fence = drv->fences[seq]; 456 if (!fence) 457 return; 458 459 to_amdgpu_fence(fence)->start_timestamp = timestamp; 460 } 461 462 /** 463 * amdgpu_fence_driver_start_ring - make the fence driver 464 * ready for use on the requested ring. 465 * 466 * @ring: ring to start the fence driver on 467 * @irq_src: interrupt source to use for this ring 468 * @irq_type: interrupt type to use for this ring 469 * 470 * Make the fence driver ready for processing (all asics). 471 * Not all asics have all rings, so each asic will only 472 * start the fence driver on the rings it has. 473 * Returns 0 for success, errors for failure. 474 */ 475 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, 476 struct amdgpu_irq_src *irq_src, 477 unsigned int irq_type) 478 { 479 struct amdgpu_device *adev = ring->adev; 480 uint64_t index; 481 482 if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) { 483 ring->fence_drv.cpu_addr = ring->fence_cpu_addr; 484 ring->fence_drv.gpu_addr = ring->fence_gpu_addr; 485 } else { 486 /* put fence directly behind firmware */ 487 index = ALIGN(adev->uvd.fw->size, 8); 488 ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index; 489 ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index; 490 } 491 amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); 492 493 ring->fence_drv.irq_src = irq_src; 494 ring->fence_drv.irq_type = irq_type; 495 ring->fence_drv.initialized = true; 496 497 DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr 0x%016llx\n", 498 ring->name, ring->fence_drv.gpu_addr); 499 return 0; 500 } 501 502 /** 503 * amdgpu_fence_driver_init_ring - init the fence driver 504 * for the requested ring. 505 * 506 * @ring: ring to init the fence driver on 507 * 508 * Init the fence driver for the requested ring (all asics). 509 * Helper function for amdgpu_fence_driver_init(). 510 */ 511 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) 512 { 513 struct amdgpu_device *adev = ring->adev; 514 515 if (!adev) 516 return -EINVAL; 517 518 if (!is_power_of_2(ring->num_hw_submission)) 519 return -EINVAL; 520 521 ring->fence_drv.cpu_addr = NULL; 522 ring->fence_drv.gpu_addr = 0; 523 ring->fence_drv.sync_seq = 0; 524 atomic_set(&ring->fence_drv.last_seq, 0); 525 ring->fence_drv.initialized = false; 526 527 timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0); 528 529 ring->fence_drv.num_fences_mask = ring->num_hw_submission * 2 - 1; 530 spin_lock_init(&ring->fence_drv.lock); 531 ring->fence_drv.fences = kcalloc(ring->num_hw_submission * 2, sizeof(void *), 532 GFP_KERNEL); 533 534 if (!ring->fence_drv.fences) 535 return -ENOMEM; 536 537 return 0; 538 } 539 540 /** 541 * amdgpu_fence_driver_sw_init - init the fence driver 542 * for all possible rings. 543 * 544 * @adev: amdgpu device pointer 545 * 546 * Init the fence driver for all possible rings (all asics). 547 * Not all asics have all rings, so each asic will only 548 * start the fence driver on the rings it has using 549 * amdgpu_fence_driver_start_ring(). 550 * Returns 0 for success. 551 */ 552 int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev) 553 { 554 return 0; 555 } 556 557 /** 558 * amdgpu_fence_driver_hw_fini - tear down the fence driver 559 * for all possible rings. 560 * 561 * @adev: amdgpu device pointer 562 * 563 * Tear down the fence driver for all possible rings (all asics). 564 */ 565 void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev) 566 { 567 int i, r; 568 569 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 570 struct amdgpu_ring *ring = adev->rings[i]; 571 572 if (!ring || !ring->fence_drv.initialized) 573 continue; 574 575 /* You can't wait for HW to signal if it's gone */ 576 if (!drm_dev_is_unplugged(adev_to_drm(adev))) 577 r = amdgpu_fence_wait_empty(ring); 578 else 579 r = -ENODEV; 580 /* no need to trigger GPU reset as we are unloading */ 581 if (r) 582 amdgpu_fence_driver_force_completion(ring); 583 584 if (ring->fence_drv.irq_src) 585 amdgpu_irq_put(adev, ring->fence_drv.irq_src, 586 ring->fence_drv.irq_type); 587 588 del_timer_sync(&ring->fence_drv.fallback_timer); 589 } 590 } 591 592 /* Will either stop and flush handlers for amdgpu interrupt or reanble it */ 593 void amdgpu_fence_driver_isr_toggle(struct amdgpu_device *adev, bool stop) 594 { 595 int i; 596 597 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 598 struct amdgpu_ring *ring = adev->rings[i]; 599 600 if (!ring || !ring->fence_drv.initialized || !ring->fence_drv.irq_src) 601 continue; 602 603 if (stop) 604 disable_irq(adev->irq.irq); 605 else 606 enable_irq(adev->irq.irq); 607 } 608 } 609 610 void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev) 611 { 612 unsigned int i, j; 613 614 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 615 struct amdgpu_ring *ring = adev->rings[i]; 616 617 if (!ring || !ring->fence_drv.initialized) 618 continue; 619 620 /* 621 * Notice we check for sched.ops since there's some 622 * override on the meaning of sched.ready by amdgpu. 623 * The natural check would be sched.ready, which is 624 * set as drm_sched_init() finishes... 625 */ 626 if (ring->sched.ops) 627 drm_sched_fini(&ring->sched); 628 629 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) 630 dma_fence_put(ring->fence_drv.fences[j]); 631 kfree(ring->fence_drv.fences); 632 ring->fence_drv.fences = NULL; 633 ring->fence_drv.initialized = false; 634 } 635 } 636 637 /** 638 * amdgpu_fence_driver_hw_init - enable the fence driver 639 * for all possible rings. 640 * 641 * @adev: amdgpu device pointer 642 * 643 * Enable the fence driver for all possible rings (all asics). 644 * Not all asics have all rings, so each asic will only 645 * start the fence driver on the rings it has using 646 * amdgpu_fence_driver_start_ring(). 647 * Returns 0 for success. 648 */ 649 void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev) 650 { 651 int i; 652 653 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 654 struct amdgpu_ring *ring = adev->rings[i]; 655 656 if (!ring || !ring->fence_drv.initialized) 657 continue; 658 659 /* enable the interrupt */ 660 if (ring->fence_drv.irq_src) 661 amdgpu_irq_get(adev, ring->fence_drv.irq_src, 662 ring->fence_drv.irq_type); 663 } 664 } 665 666 /** 667 * amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring 668 * 669 * @ring: fence of the ring to be cleared 670 * 671 */ 672 void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring) 673 { 674 int i; 675 struct dma_fence *old, **ptr; 676 677 for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) { 678 ptr = &ring->fence_drv.fences[i]; 679 old = rcu_dereference_protected(*ptr, 1); 680 if (old && old->ops == &amdgpu_job_fence_ops) { 681 struct amdgpu_job *job; 682 683 /* For non-scheduler bad job, i.e. failed ib test, we need to signal 684 * it right here or we won't be able to track them in fence_drv 685 * and they will remain unsignaled during sa_bo free. 686 */ 687 job = container_of(old, struct amdgpu_job, hw_fence); 688 if (!job->base.s_fence && !dma_fence_is_signaled(old)) 689 dma_fence_signal(old); 690 RCU_INIT_POINTER(*ptr, NULL); 691 dma_fence_put(old); 692 } 693 } 694 } 695 696 /** 697 * amdgpu_fence_driver_force_completion - force signal latest fence of ring 698 * 699 * @ring: fence of the ring to signal 700 * 701 */ 702 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring) 703 { 704 amdgpu_fence_write(ring, ring->fence_drv.sync_seq); 705 amdgpu_fence_process(ring); 706 } 707 708 /* 709 * Common fence implementation 710 */ 711 712 static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence) 713 { 714 return "amdgpu"; 715 } 716 717 static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f) 718 { 719 return (const char *)to_amdgpu_fence(f)->ring->name; 720 } 721 722 static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f) 723 { 724 struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence); 725 726 return (const char *)to_amdgpu_ring(job->base.sched)->name; 727 } 728 729 /** 730 * amdgpu_fence_enable_signaling - enable signalling on fence 731 * @f: fence 732 * 733 * This function is called with fence_queue lock held, and adds a callback 734 * to fence_queue that checks if this fence is signaled, and if so it 735 * signals the fence and removes itself. 736 */ 737 static bool amdgpu_fence_enable_signaling(struct dma_fence *f) 738 { 739 if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer)) 740 amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring); 741 742 return true; 743 } 744 745 /** 746 * amdgpu_job_fence_enable_signaling - enable signalling on job fence 747 * @f: fence 748 * 749 * This is the simliar function with amdgpu_fence_enable_signaling above, it 750 * only handles the job embedded fence. 751 */ 752 static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f) 753 { 754 struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence); 755 756 if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer)) 757 amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched)); 758 759 return true; 760 } 761 762 /** 763 * amdgpu_fence_free - free up the fence memory 764 * 765 * @rcu: RCU callback head 766 * 767 * Free up the fence memory after the RCU grace period. 768 */ 769 static void amdgpu_fence_free(struct rcu_head *rcu) 770 { 771 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); 772 773 /* free fence_slab if it's separated fence*/ 774 kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f)); 775 } 776 777 /** 778 * amdgpu_job_fence_free - free up the job with embedded fence 779 * 780 * @rcu: RCU callback head 781 * 782 * Free up the job with embedded fence after the RCU grace period. 783 */ 784 static void amdgpu_job_fence_free(struct rcu_head *rcu) 785 { 786 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); 787 788 /* free job if fence has a parent job */ 789 kfree(container_of(f, struct amdgpu_job, hw_fence)); 790 } 791 792 /** 793 * amdgpu_fence_release - callback that fence can be freed 794 * 795 * @f: fence 796 * 797 * This function is called when the reference count becomes zero. 798 * It just RCU schedules freeing up the fence. 799 */ 800 static void amdgpu_fence_release(struct dma_fence *f) 801 { 802 call_rcu(&f->rcu, amdgpu_fence_free); 803 } 804 805 /** 806 * amdgpu_job_fence_release - callback that job embedded fence can be freed 807 * 808 * @f: fence 809 * 810 * This is the simliar function with amdgpu_fence_release above, it 811 * only handles the job embedded fence. 812 */ 813 static void amdgpu_job_fence_release(struct dma_fence *f) 814 { 815 call_rcu(&f->rcu, amdgpu_job_fence_free); 816 } 817 818 static const struct dma_fence_ops amdgpu_fence_ops = { 819 .get_driver_name = amdgpu_fence_get_driver_name, 820 .get_timeline_name = amdgpu_fence_get_timeline_name, 821 .enable_signaling = amdgpu_fence_enable_signaling, 822 .release = amdgpu_fence_release, 823 }; 824 825 static const struct dma_fence_ops amdgpu_job_fence_ops = { 826 .get_driver_name = amdgpu_fence_get_driver_name, 827 .get_timeline_name = amdgpu_job_fence_get_timeline_name, 828 .enable_signaling = amdgpu_job_fence_enable_signaling, 829 .release = amdgpu_job_fence_release, 830 }; 831 832 /* 833 * Fence debugfs 834 */ 835 #if defined(CONFIG_DEBUG_FS) 836 static int amdgpu_debugfs_fence_info_show(struct seq_file *m, void *unused) 837 { 838 struct amdgpu_device *adev = (struct amdgpu_device *)m->private; 839 int i; 840 841 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 842 struct amdgpu_ring *ring = adev->rings[i]; 843 844 if (!ring || !ring->fence_drv.initialized) 845 continue; 846 847 amdgpu_fence_process(ring); 848 849 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name); 850 seq_printf(m, "Last signaled fence 0x%08x\n", 851 atomic_read(&ring->fence_drv.last_seq)); 852 seq_printf(m, "Last emitted 0x%08x\n", 853 ring->fence_drv.sync_seq); 854 855 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX || 856 ring->funcs->type == AMDGPU_RING_TYPE_SDMA) { 857 seq_printf(m, "Last signaled trailing fence 0x%08x\n", 858 le32_to_cpu(*ring->trail_fence_cpu_addr)); 859 seq_printf(m, "Last emitted 0x%08x\n", 860 ring->trail_seq); 861 } 862 863 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX) 864 continue; 865 866 /* set in CP_VMID_PREEMPT and preemption occurred */ 867 seq_printf(m, "Last preempted 0x%08x\n", 868 le32_to_cpu(*(ring->fence_drv.cpu_addr + 2))); 869 /* set in CP_VMID_RESET and reset occurred */ 870 seq_printf(m, "Last reset 0x%08x\n", 871 le32_to_cpu(*(ring->fence_drv.cpu_addr + 4))); 872 /* Both preemption and reset occurred */ 873 seq_printf(m, "Last both 0x%08x\n", 874 le32_to_cpu(*(ring->fence_drv.cpu_addr + 6))); 875 } 876 return 0; 877 } 878 879 /* 880 * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover 881 * 882 * Manually trigger a gpu reset at the next fence wait. 883 */ 884 static int gpu_recover_get(void *data, u64 *val) 885 { 886 struct amdgpu_device *adev = (struct amdgpu_device *)data; 887 struct drm_device *dev = adev_to_drm(adev); 888 int r; 889 890 r = pm_runtime_get_sync(dev->dev); 891 if (r < 0) { 892 pm_runtime_put_autosuspend(dev->dev); 893 return 0; 894 } 895 896 if (amdgpu_reset_domain_schedule(adev->reset_domain, &adev->reset_work)) 897 flush_work(&adev->reset_work); 898 899 *val = atomic_read(&adev->reset_domain->reset_res); 900 901 pm_runtime_mark_last_busy(dev->dev); 902 pm_runtime_put_autosuspend(dev->dev); 903 904 return 0; 905 } 906 907 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_fence_info); 908 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gpu_recover_fops, gpu_recover_get, NULL, 909 "%lld\n"); 910 911 static void amdgpu_debugfs_reset_work(struct work_struct *work) 912 { 913 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, 914 reset_work); 915 916 struct amdgpu_reset_context reset_context; 917 918 memset(&reset_context, 0, sizeof(reset_context)); 919 920 reset_context.method = AMD_RESET_METHOD_NONE; 921 reset_context.reset_req_dev = adev; 922 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 923 924 amdgpu_device_gpu_recover(adev, NULL, &reset_context); 925 } 926 927 #endif 928 929 void amdgpu_debugfs_fence_init(struct amdgpu_device *adev) 930 { 931 #if defined(CONFIG_DEBUG_FS) 932 struct drm_minor *minor = adev_to_drm(adev)->primary; 933 struct dentry *root = minor->debugfs_root; 934 935 debugfs_create_file("amdgpu_fence_info", 0444, root, adev, 936 &amdgpu_debugfs_fence_info_fops); 937 938 if (!amdgpu_sriov_vf(adev)) { 939 940 INIT_WORK(&adev->reset_work, amdgpu_debugfs_reset_work); 941 debugfs_create_file("amdgpu_gpu_recover", 0444, root, adev, 942 &amdgpu_debugfs_gpu_recover_fops); 943 } 944 #endif 945 } 946 947