1 /* 2 * Copyright 2009 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 * Dave Airlie 30 */ 31 #include <linux/seq_file.h> 32 #include <linux/atomic.h> 33 #include <linux/wait.h> 34 #include <linux/kref.h> 35 #include <linux/slab.h> 36 #include <linux/firmware.h> 37 #include <drm/drmP.h> 38 #include "amdgpu.h" 39 #include "amdgpu_trace.h" 40 41 /* 42 * Fences 43 * Fences mark an event in the GPUs pipeline and are used 44 * for GPU/CPU synchronization. When the fence is written, 45 * it is expected that all buffers associated with that fence 46 * are no longer in use by the associated ring on the GPU and 47 * that the the relevant GPU caches have been flushed. 48 */ 49 50 /** 51 * amdgpu_fence_write - write a fence value 52 * 53 * @ring: ring the fence is associated with 54 * @seq: sequence number to write 55 * 56 * Writes a fence value to memory (all asics). 57 */ 58 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq) 59 { 60 struct amdgpu_fence_driver *drv = &ring->fence_drv; 61 62 if (drv->cpu_addr) 63 *drv->cpu_addr = cpu_to_le32(seq); 64 } 65 66 /** 67 * amdgpu_fence_read - read a fence value 68 * 69 * @ring: ring the fence is associated with 70 * 71 * Reads a fence value from memory (all asics). 72 * Returns the value of the fence read from memory. 73 */ 74 static u32 amdgpu_fence_read(struct amdgpu_ring *ring) 75 { 76 struct amdgpu_fence_driver *drv = &ring->fence_drv; 77 u32 seq = 0; 78 79 if (drv->cpu_addr) 80 seq = le32_to_cpu(*drv->cpu_addr); 81 else 82 seq = lower_32_bits(atomic64_read(&drv->last_seq)); 83 84 return seq; 85 } 86 87 /** 88 * amdgpu_fence_schedule_check - schedule lockup check 89 * 90 * @ring: pointer to struct amdgpu_ring 91 * 92 * Queues a delayed work item to check for lockups. 93 */ 94 static void amdgpu_fence_schedule_check(struct amdgpu_ring *ring) 95 { 96 /* 97 * Do not reset the timer here with mod_delayed_work, 98 * this can livelock in an interaction with TTM delayed destroy. 99 */ 100 queue_delayed_work(system_power_efficient_wq, 101 &ring->fence_drv.lockup_work, 102 AMDGPU_FENCE_JIFFIES_TIMEOUT); 103 } 104 105 /** 106 * amdgpu_fence_emit - emit a fence on the requested ring 107 * 108 * @ring: ring the fence is associated with 109 * @owner: creator of the fence 110 * @fence: amdgpu fence object 111 * 112 * Emits a fence command on the requested ring (all asics). 113 * Returns 0 on success, -ENOMEM on failure. 114 */ 115 int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, 116 struct amdgpu_fence **fence) 117 { 118 struct amdgpu_device *adev = ring->adev; 119 120 /* we are protected by the ring emission mutex */ 121 *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL); 122 if ((*fence) == NULL) { 123 return -ENOMEM; 124 } 125 (*fence)->seq = ++ring->fence_drv.sync_seq[ring->idx]; 126 (*fence)->ring = ring; 127 (*fence)->owner = owner; 128 fence_init(&(*fence)->base, &amdgpu_fence_ops, 129 &ring->fence_drv.fence_queue.lock, 130 adev->fence_context + ring->idx, 131 (*fence)->seq); 132 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 133 (*fence)->seq, 134 AMDGPU_FENCE_FLAG_INT); 135 trace_amdgpu_fence_emit(ring->adev->ddev, ring->idx, (*fence)->seq); 136 return 0; 137 } 138 139 /** 140 * amdgpu_fence_check_signaled - callback from fence_queue 141 * 142 * this function is called with fence_queue lock held, which is also used 143 * for the fence locking itself, so unlocked variants are used for 144 * fence_signal, and remove_wait_queue. 145 */ 146 static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key) 147 { 148 struct amdgpu_fence *fence; 149 struct amdgpu_device *adev; 150 u64 seq; 151 int ret; 152 153 fence = container_of(wait, struct amdgpu_fence, fence_wake); 154 adev = fence->ring->adev; 155 156 /* 157 * We cannot use amdgpu_fence_process here because we're already 158 * in the waitqueue, in a call from wake_up_all. 159 */ 160 seq = atomic64_read(&fence->ring->fence_drv.last_seq); 161 if (seq >= fence->seq) { 162 ret = fence_signal_locked(&fence->base); 163 if (!ret) 164 FENCE_TRACE(&fence->base, "signaled from irq context\n"); 165 else 166 FENCE_TRACE(&fence->base, "was already signaled\n"); 167 168 __remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake); 169 fence_put(&fence->base); 170 } else 171 FENCE_TRACE(&fence->base, "pending\n"); 172 return 0; 173 } 174 175 /** 176 * amdgpu_fence_activity - check for fence activity 177 * 178 * @ring: pointer to struct amdgpu_ring 179 * 180 * Checks the current fence value and calculates the last 181 * signalled fence value. Returns true if activity occured 182 * on the ring, and the fence_queue should be waken up. 183 */ 184 static bool amdgpu_fence_activity(struct amdgpu_ring *ring) 185 { 186 uint64_t seq, last_seq, last_emitted; 187 unsigned count_loop = 0; 188 bool wake = false; 189 190 /* Note there is a scenario here for an infinite loop but it's 191 * very unlikely to happen. For it to happen, the current polling 192 * process need to be interrupted by another process and another 193 * process needs to update the last_seq btw the atomic read and 194 * xchg of the current process. 195 * 196 * More over for this to go in infinite loop there need to be 197 * continuously new fence signaled ie amdgpu_fence_read needs 198 * to return a different value each time for both the currently 199 * polling process and the other process that xchg the last_seq 200 * btw atomic read and xchg of the current process. And the 201 * value the other process set as last seq must be higher than 202 * the seq value we just read. Which means that current process 203 * need to be interrupted after amdgpu_fence_read and before 204 * atomic xchg. 205 * 206 * To be even more safe we count the number of time we loop and 207 * we bail after 10 loop just accepting the fact that we might 208 * have temporarly set the last_seq not to the true real last 209 * seq but to an older one. 210 */ 211 last_seq = atomic64_read(&ring->fence_drv.last_seq); 212 do { 213 last_emitted = ring->fence_drv.sync_seq[ring->idx]; 214 seq = amdgpu_fence_read(ring); 215 seq |= last_seq & 0xffffffff00000000LL; 216 if (seq < last_seq) { 217 seq &= 0xffffffff; 218 seq |= last_emitted & 0xffffffff00000000LL; 219 } 220 221 if (seq <= last_seq || seq > last_emitted) { 222 break; 223 } 224 /* If we loop over we don't want to return without 225 * checking if a fence is signaled as it means that the 226 * seq we just read is different from the previous on. 227 */ 228 wake = true; 229 last_seq = seq; 230 if ((count_loop++) > 10) { 231 /* We looped over too many time leave with the 232 * fact that we might have set an older fence 233 * seq then the current real last seq as signaled 234 * by the hw. 235 */ 236 break; 237 } 238 } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq); 239 240 if (seq < last_emitted) 241 amdgpu_fence_schedule_check(ring); 242 243 return wake; 244 } 245 246 /** 247 * amdgpu_fence_check_lockup - check for hardware lockup 248 * 249 * @work: delayed work item 250 * 251 * Checks for fence activity and if there is none probe 252 * the hardware if a lockup occured. 253 */ 254 static void amdgpu_fence_check_lockup(struct work_struct *work) 255 { 256 struct amdgpu_fence_driver *fence_drv; 257 struct amdgpu_ring *ring; 258 259 fence_drv = container_of(work, struct amdgpu_fence_driver, 260 lockup_work.work); 261 ring = fence_drv->ring; 262 263 if (!down_read_trylock(&ring->adev->exclusive_lock)) { 264 /* just reschedule the check if a reset is going on */ 265 amdgpu_fence_schedule_check(ring); 266 return; 267 } 268 269 if (amdgpu_fence_activity(ring)) { 270 wake_up_all(&ring->fence_drv.fence_queue); 271 } 272 else if (amdgpu_ring_is_lockup(ring)) { 273 /* good news we believe it's a lockup */ 274 dev_warn(ring->adev->dev, "GPU lockup (current fence id " 275 "0x%016llx last fence id 0x%016llx on ring %d)\n", 276 (uint64_t)atomic64_read(&fence_drv->last_seq), 277 fence_drv->sync_seq[ring->idx], ring->idx); 278 279 /* remember that we need an reset */ 280 ring->adev->needs_reset = true; 281 wake_up_all(&ring->fence_drv.fence_queue); 282 } 283 up_read(&ring->adev->exclusive_lock); 284 } 285 286 /** 287 * amdgpu_fence_process - process a fence 288 * 289 * @adev: amdgpu_device pointer 290 * @ring: ring index the fence is associated with 291 * 292 * Checks the current fence value and wakes the fence queue 293 * if the sequence number has increased (all asics). 294 */ 295 void amdgpu_fence_process(struct amdgpu_ring *ring) 296 { 297 uint64_t seq, last_seq, last_emitted; 298 unsigned count_loop = 0; 299 bool wake = false; 300 unsigned long irqflags; 301 302 /* Note there is a scenario here for an infinite loop but it's 303 * very unlikely to happen. For it to happen, the current polling 304 * process need to be interrupted by another process and another 305 * process needs to update the last_seq btw the atomic read and 306 * xchg of the current process. 307 * 308 * More over for this to go in infinite loop there need to be 309 * continuously new fence signaled ie amdgpu_fence_read needs 310 * to return a different value each time for both the currently 311 * polling process and the other process that xchg the last_seq 312 * btw atomic read and xchg of the current process. And the 313 * value the other process set as last seq must be higher than 314 * the seq value we just read. Which means that current process 315 * need to be interrupted after amdgpu_fence_read and before 316 * atomic xchg. 317 * 318 * To be even more safe we count the number of time we loop and 319 * we bail after 10 loop just accepting the fact that we might 320 * have temporarly set the last_seq not to the true real last 321 * seq but to an older one. 322 */ 323 spin_lock_irqsave(&ring->fence_lock, irqflags); 324 last_seq = atomic64_read(&ring->fence_drv.last_seq); 325 do { 326 last_emitted = ring->fence_drv.sync_seq[ring->idx]; 327 seq = amdgpu_fence_read(ring); 328 seq |= last_seq & 0xffffffff00000000LL; 329 if (seq < last_seq) { 330 seq &= 0xffffffff; 331 seq |= last_emitted & 0xffffffff00000000LL; 332 } 333 334 if (seq <= last_seq || seq > last_emitted) { 335 break; 336 } 337 /* If we loop over we don't want to return without 338 * checking if a fence is signaled as it means that the 339 * seq we just read is different from the previous on. 340 */ 341 wake = true; 342 last_seq = seq; 343 if ((count_loop++) > 10) { 344 /* We looped over too many time leave with the 345 * fact that we might have set an older fence 346 * seq then the current real last seq as signaled 347 * by the hw. 348 */ 349 break; 350 } 351 } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq); 352 353 if (wake) 354 wake_up_all(&ring->fence_drv.fence_queue); 355 spin_unlock_irqrestore(&ring->fence_lock, irqflags); 356 } 357 358 /** 359 * amdgpu_fence_seq_signaled - check if a fence sequence number has signaled 360 * 361 * @ring: ring the fence is associated with 362 * @seq: sequence number 363 * 364 * Check if the last signaled fence sequnce number is >= the requested 365 * sequence number (all asics). 366 * Returns true if the fence has signaled (current fence value 367 * is >= requested value) or false if it has not (current fence 368 * value is < the requested value. Helper function for 369 * amdgpu_fence_signaled(). 370 */ 371 static bool amdgpu_fence_seq_signaled(struct amdgpu_ring *ring, u64 seq) 372 { 373 if (atomic64_read(&ring->fence_drv.last_seq) >= seq) 374 return true; 375 376 /* poll new last sequence at least once */ 377 amdgpu_fence_process(ring); 378 if (atomic64_read(&ring->fence_drv.last_seq) >= seq) 379 return true; 380 381 return false; 382 } 383 384 static bool amdgpu_fence_is_signaled(struct fence *f) 385 { 386 struct amdgpu_fence *fence = to_amdgpu_fence(f); 387 struct amdgpu_ring *ring = fence->ring; 388 struct amdgpu_device *adev = ring->adev; 389 390 if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) 391 return true; 392 393 if (down_read_trylock(&adev->exclusive_lock)) { 394 amdgpu_fence_process(ring); 395 up_read(&adev->exclusive_lock); 396 397 if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) 398 return true; 399 } 400 return false; 401 } 402 403 /** 404 * amdgpu_fence_enable_signaling - enable signalling on fence 405 * @fence: fence 406 * 407 * This function is called with fence_queue lock held, and adds a callback 408 * to fence_queue that checks if this fence is signaled, and if so it 409 * signals the fence and removes itself. 410 */ 411 static bool amdgpu_fence_enable_signaling(struct fence *f) 412 { 413 struct amdgpu_fence *fence = to_amdgpu_fence(f); 414 struct amdgpu_ring *ring = fence->ring; 415 416 if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) 417 return false; 418 419 fence->fence_wake.flags = 0; 420 fence->fence_wake.private = NULL; 421 fence->fence_wake.func = amdgpu_fence_check_signaled; 422 __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake); 423 fence_get(f); 424 FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); 425 return true; 426 } 427 428 /* 429 * amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal 430 * @ring: ring to wait on for the seq number 431 * @seq: seq number wait for 432 * @intr: if interruptible 433 * @timeout: jiffies before time out 434 * 435 * return value: 436 * 0: time out but seq not signaled, and gpu not hang 437 * X (X > 0): seq signaled and X means how many jiffies remains before time out 438 * -EDEADL: GPU hang before time out 439 * -ESYSRESTART: interrupted before seq signaled 440 * -EINVAL: some paramter is not valid 441 */ 442 static long amdgpu_fence_ring_wait_seq_timeout(struct amdgpu_ring *ring, uint64_t seq, 443 bool intr, long timeout) 444 { 445 struct amdgpu_device *adev = ring->adev; 446 long r = 0; 447 bool signaled = false; 448 449 BUG_ON(!ring); 450 if (seq > ring->fence_drv.sync_seq[ring->idx]) 451 return -EINVAL; 452 453 if (atomic64_read(&ring->fence_drv.last_seq) >= seq) 454 return timeout; 455 456 while (1) { 457 if (intr) { 458 r = wait_event_interruptible_timeout(ring->fence_drv.fence_queue, ( 459 (signaled = amdgpu_fence_seq_signaled(ring, seq)) 460 || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT); 461 462 if (r == -ERESTARTSYS) /* interrupted */ 463 return r; 464 } else { 465 r = wait_event_timeout(ring->fence_drv.fence_queue, ( 466 (signaled = amdgpu_fence_seq_signaled(ring, seq)) 467 || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT); 468 } 469 470 if (signaled) { 471 /* seq signaled */ 472 if (timeout == MAX_SCHEDULE_TIMEOUT) 473 return timeout; 474 return (timeout - AMDGPU_FENCE_JIFFIES_TIMEOUT - r); 475 } 476 else if (adev->needs_reset) { 477 return -EDEADLK; 478 } 479 480 /* check if it's a lockup */ 481 if (amdgpu_ring_is_lockup(ring)) { 482 uint64_t last_seq = atomic64_read(&ring->fence_drv.last_seq); 483 /* ring lookup */ 484 dev_warn(adev->dev, "GPU lockup (waiting for " 485 "0x%016llx last fence id 0x%016llx on" 486 " ring %d)\n", 487 seq, last_seq, ring->idx); 488 wake_up_all(&ring->fence_drv.fence_queue); 489 return -EDEADLK; 490 } 491 492 if (timeout < MAX_SCHEDULE_TIMEOUT) { 493 timeout -= AMDGPU_FENCE_JIFFIES_TIMEOUT; 494 if (timeout < 1) 495 return 0; 496 } 497 } 498 } 499 500 501 /** 502 * amdgpu_fence_wait - wait for a fence to signal 503 * 504 * @fence: amdgpu fence object 505 * @intr: use interruptable sleep 506 * 507 * Wait for the requested fence to signal (all asics). 508 * @intr selects whether to use interruptable (true) or non-interruptable 509 * (false) sleep when waiting for the fence. 510 * Returns 0 if the fence has passed, error for all other cases. 511 */ 512 int amdgpu_fence_wait(struct amdgpu_fence *fence, bool intr) 513 { 514 long r; 515 516 r = fence_wait_timeout(&fence->base, intr, MAX_SCHEDULE_TIMEOUT); 517 if (r < 0) 518 return r; 519 return 0; 520 } 521 522 /** 523 * amdgpu_fence_wait_next - wait for the next fence to signal 524 * 525 * @adev: amdgpu device pointer 526 * @ring: ring index the fence is associated with 527 * 528 * Wait for the next fence on the requested ring to signal (all asics). 529 * Returns 0 if the next fence has passed, error for all other cases. 530 * Caller must hold ring lock. 531 */ 532 int amdgpu_fence_wait_next(struct amdgpu_ring *ring) 533 { 534 long r; 535 536 uint64_t seq = atomic64_read(&ring->fence_drv.last_seq) + 1ULL; 537 if (seq >= ring->fence_drv.sync_seq[ring->idx]) 538 return -ENOENT; 539 r = amdgpu_fence_ring_wait_seq_timeout(ring, seq, false, MAX_SCHEDULE_TIMEOUT); 540 if (r < 0) 541 return r; 542 543 return 0; 544 } 545 546 /** 547 * amdgpu_fence_wait_empty - wait for all fences to signal 548 * 549 * @adev: amdgpu device pointer 550 * @ring: ring index the fence is associated with 551 * 552 * Wait for all fences on the requested ring to signal (all asics). 553 * Returns 0 if the fences have passed, error for all other cases. 554 * Caller must hold ring lock. 555 */ 556 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) 557 { 558 long r; 559 560 uint64_t seq = ring->fence_drv.sync_seq[ring->idx]; 561 if (!seq) 562 return 0; 563 564 r = amdgpu_fence_ring_wait_seq_timeout(ring, seq, false, MAX_SCHEDULE_TIMEOUT); 565 566 if (r < 0) { 567 if (r == -EDEADLK) 568 return -EDEADLK; 569 570 dev_err(ring->adev->dev, "error waiting for ring[%d] to become idle (%ld)\n", 571 ring->idx, r); 572 } 573 return 0; 574 } 575 576 /** 577 * amdgpu_fence_ref - take a ref on a fence 578 * 579 * @fence: amdgpu fence object 580 * 581 * Take a reference on a fence (all asics). 582 * Returns the fence. 583 */ 584 struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence) 585 { 586 fence_get(&fence->base); 587 return fence; 588 } 589 590 /** 591 * amdgpu_fence_unref - remove a ref on a fence 592 * 593 * @fence: amdgpu fence object 594 * 595 * Remove a reference on a fence (all asics). 596 */ 597 void amdgpu_fence_unref(struct amdgpu_fence **fence) 598 { 599 struct amdgpu_fence *tmp = *fence; 600 601 *fence = NULL; 602 if (tmp) 603 fence_put(&tmp->base); 604 } 605 606 /** 607 * amdgpu_fence_count_emitted - get the count of emitted fences 608 * 609 * @ring: ring the fence is associated with 610 * 611 * Get the number of fences emitted on the requested ring (all asics). 612 * Returns the number of emitted fences on the ring. Used by the 613 * dynpm code to ring track activity. 614 */ 615 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring) 616 { 617 uint64_t emitted; 618 619 /* We are not protected by ring lock when reading the last sequence 620 * but it's ok to report slightly wrong fence count here. 621 */ 622 amdgpu_fence_process(ring); 623 emitted = ring->fence_drv.sync_seq[ring->idx] 624 - atomic64_read(&ring->fence_drv.last_seq); 625 /* to avoid 32bits warp around */ 626 if (emitted > 0x10000000) 627 emitted = 0x10000000; 628 629 return (unsigned)emitted; 630 } 631 632 /** 633 * amdgpu_fence_need_sync - do we need a semaphore 634 * 635 * @fence: amdgpu fence object 636 * @dst_ring: which ring to check against 637 * 638 * Check if the fence needs to be synced against another ring 639 * (all asics). If so, we need to emit a semaphore. 640 * Returns true if we need to sync with another ring, false if 641 * not. 642 */ 643 bool amdgpu_fence_need_sync(struct amdgpu_fence *fence, 644 struct amdgpu_ring *dst_ring) 645 { 646 struct amdgpu_fence_driver *fdrv; 647 648 if (!fence) 649 return false; 650 651 if (fence->ring == dst_ring) 652 return false; 653 654 /* we are protected by the ring mutex */ 655 fdrv = &dst_ring->fence_drv; 656 if (fence->seq <= fdrv->sync_seq[fence->ring->idx]) 657 return false; 658 659 return true; 660 } 661 662 /** 663 * amdgpu_fence_note_sync - record the sync point 664 * 665 * @fence: amdgpu fence object 666 * @dst_ring: which ring to check against 667 * 668 * Note the sequence number at which point the fence will 669 * be synced with the requested ring (all asics). 670 */ 671 void amdgpu_fence_note_sync(struct amdgpu_fence *fence, 672 struct amdgpu_ring *dst_ring) 673 { 674 struct amdgpu_fence_driver *dst, *src; 675 unsigned i; 676 677 if (!fence) 678 return; 679 680 if (fence->ring == dst_ring) 681 return; 682 683 /* we are protected by the ring mutex */ 684 src = &fence->ring->fence_drv; 685 dst = &dst_ring->fence_drv; 686 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 687 if (i == dst_ring->idx) 688 continue; 689 690 dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]); 691 } 692 } 693 694 /** 695 * amdgpu_fence_driver_start_ring - make the fence driver 696 * ready for use on the requested ring. 697 * 698 * @ring: ring to start the fence driver on 699 * @irq_src: interrupt source to use for this ring 700 * @irq_type: interrupt type to use for this ring 701 * 702 * Make the fence driver ready for processing (all asics). 703 * Not all asics have all rings, so each asic will only 704 * start the fence driver on the rings it has. 705 * Returns 0 for success, errors for failure. 706 */ 707 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, 708 struct amdgpu_irq_src *irq_src, 709 unsigned irq_type) 710 { 711 struct amdgpu_device *adev = ring->adev; 712 uint64_t index; 713 714 if (ring != &adev->uvd.ring) { 715 ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs]; 716 ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4); 717 } else { 718 /* put fence directly behind firmware */ 719 index = ALIGN(adev->uvd.fw->size, 8); 720 ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index; 721 ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index; 722 } 723 amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq)); 724 amdgpu_irq_get(adev, irq_src, irq_type); 725 726 ring->fence_drv.irq_src = irq_src; 727 ring->fence_drv.irq_type = irq_type; 728 ring->fence_drv.initialized = true; 729 730 dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, " 731 "cpu addr 0x%p\n", ring->idx, 732 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr); 733 return 0; 734 } 735 736 /** 737 * amdgpu_fence_driver_init_ring - init the fence driver 738 * for the requested ring. 739 * 740 * @ring: ring to init the fence driver on 741 * 742 * Init the fence driver for the requested ring (all asics). 743 * Helper function for amdgpu_fence_driver_init(). 744 */ 745 void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) 746 { 747 int i; 748 749 ring->fence_drv.cpu_addr = NULL; 750 ring->fence_drv.gpu_addr = 0; 751 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 752 ring->fence_drv.sync_seq[i] = 0; 753 754 atomic64_set(&ring->fence_drv.last_seq, 0); 755 ring->fence_drv.initialized = false; 756 757 INIT_DELAYED_WORK(&ring->fence_drv.lockup_work, 758 amdgpu_fence_check_lockup); 759 ring->fence_drv.ring = ring; 760 761 if (amdgpu_enable_scheduler) { 762 ring->scheduler = amd_sched_create((void *)ring->adev, 763 &amdgpu_sched_ops, 764 ring->idx, 5, 0, 765 amdgpu_sched_hw_submission); 766 if (!ring->scheduler) 767 DRM_ERROR("Failed to create scheduler on ring %d.\n", 768 ring->idx); 769 } 770 } 771 772 /** 773 * amdgpu_fence_driver_init - init the fence driver 774 * for all possible rings. 775 * 776 * @adev: amdgpu device pointer 777 * 778 * Init the fence driver for all possible rings (all asics). 779 * Not all asics have all rings, so each asic will only 780 * start the fence driver on the rings it has using 781 * amdgpu_fence_driver_start_ring(). 782 * Returns 0 for success. 783 */ 784 int amdgpu_fence_driver_init(struct amdgpu_device *adev) 785 { 786 if (amdgpu_debugfs_fence_init(adev)) 787 dev_err(adev->dev, "fence debugfs file creation failed\n"); 788 789 return 0; 790 } 791 792 /** 793 * amdgpu_fence_driver_fini - tear down the fence driver 794 * for all possible rings. 795 * 796 * @adev: amdgpu device pointer 797 * 798 * Tear down the fence driver for all possible rings (all asics). 799 */ 800 void amdgpu_fence_driver_fini(struct amdgpu_device *adev) 801 { 802 int i, r; 803 804 mutex_lock(&adev->ring_lock); 805 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 806 struct amdgpu_ring *ring = adev->rings[i]; 807 if (!ring || !ring->fence_drv.initialized) 808 continue; 809 r = amdgpu_fence_wait_empty(ring); 810 if (r) { 811 /* no need to trigger GPU reset as we are unloading */ 812 amdgpu_fence_driver_force_completion(adev); 813 } 814 wake_up_all(&ring->fence_drv.fence_queue); 815 amdgpu_irq_put(adev, ring->fence_drv.irq_src, 816 ring->fence_drv.irq_type); 817 if (ring->scheduler) 818 amd_sched_destroy(ring->scheduler); 819 ring->fence_drv.initialized = false; 820 } 821 mutex_unlock(&adev->ring_lock); 822 } 823 824 /** 825 * amdgpu_fence_driver_suspend - suspend the fence driver 826 * for all possible rings. 827 * 828 * @adev: amdgpu device pointer 829 * 830 * Suspend the fence driver for all possible rings (all asics). 831 */ 832 void amdgpu_fence_driver_suspend(struct amdgpu_device *adev) 833 { 834 int i, r; 835 836 mutex_lock(&adev->ring_lock); 837 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 838 struct amdgpu_ring *ring = adev->rings[i]; 839 if (!ring || !ring->fence_drv.initialized) 840 continue; 841 842 /* wait for gpu to finish processing current batch */ 843 r = amdgpu_fence_wait_empty(ring); 844 if (r) { 845 /* delay GPU reset to resume */ 846 amdgpu_fence_driver_force_completion(adev); 847 } 848 849 /* disable the interrupt */ 850 amdgpu_irq_put(adev, ring->fence_drv.irq_src, 851 ring->fence_drv.irq_type); 852 } 853 mutex_unlock(&adev->ring_lock); 854 } 855 856 /** 857 * amdgpu_fence_driver_resume - resume the fence driver 858 * for all possible rings. 859 * 860 * @adev: amdgpu device pointer 861 * 862 * Resume the fence driver for all possible rings (all asics). 863 * Not all asics have all rings, so each asic will only 864 * start the fence driver on the rings it has using 865 * amdgpu_fence_driver_start_ring(). 866 * Returns 0 for success. 867 */ 868 void amdgpu_fence_driver_resume(struct amdgpu_device *adev) 869 { 870 int i; 871 872 mutex_lock(&adev->ring_lock); 873 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 874 struct amdgpu_ring *ring = adev->rings[i]; 875 if (!ring || !ring->fence_drv.initialized) 876 continue; 877 878 /* enable the interrupt */ 879 amdgpu_irq_get(adev, ring->fence_drv.irq_src, 880 ring->fence_drv.irq_type); 881 } 882 mutex_unlock(&adev->ring_lock); 883 } 884 885 /** 886 * amdgpu_fence_driver_force_completion - force all fence waiter to complete 887 * 888 * @adev: amdgpu device pointer 889 * 890 * In case of GPU reset failure make sure no process keep waiting on fence 891 * that will never complete. 892 */ 893 void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev) 894 { 895 int i; 896 897 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 898 struct amdgpu_ring *ring = adev->rings[i]; 899 if (!ring || !ring->fence_drv.initialized) 900 continue; 901 902 amdgpu_fence_write(ring, ring->fence_drv.sync_seq[i]); 903 } 904 } 905 906 907 /* 908 * Fence debugfs 909 */ 910 #if defined(CONFIG_DEBUG_FS) 911 static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data) 912 { 913 struct drm_info_node *node = (struct drm_info_node *)m->private; 914 struct drm_device *dev = node->minor->dev; 915 struct amdgpu_device *adev = dev->dev_private; 916 int i, j; 917 918 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 919 struct amdgpu_ring *ring = adev->rings[i]; 920 if (!ring || !ring->fence_drv.initialized) 921 continue; 922 923 amdgpu_fence_process(ring); 924 925 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name); 926 seq_printf(m, "Last signaled fence 0x%016llx\n", 927 (unsigned long long)atomic64_read(&ring->fence_drv.last_seq)); 928 seq_printf(m, "Last emitted 0x%016llx\n", 929 ring->fence_drv.sync_seq[i]); 930 931 for (j = 0; j < AMDGPU_MAX_RINGS; ++j) { 932 struct amdgpu_ring *other = adev->rings[j]; 933 if (i != j && other && other->fence_drv.initialized && 934 ring->fence_drv.sync_seq[j]) 935 seq_printf(m, "Last sync to ring %d 0x%016llx\n", 936 j, ring->fence_drv.sync_seq[j]); 937 } 938 } 939 return 0; 940 } 941 942 static struct drm_info_list amdgpu_debugfs_fence_list[] = { 943 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL}, 944 }; 945 #endif 946 947 int amdgpu_debugfs_fence_init(struct amdgpu_device *adev) 948 { 949 #if defined(CONFIG_DEBUG_FS) 950 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 1); 951 #else 952 return 0; 953 #endif 954 } 955 956 static const char *amdgpu_fence_get_driver_name(struct fence *fence) 957 { 958 return "amdgpu"; 959 } 960 961 static const char *amdgpu_fence_get_timeline_name(struct fence *f) 962 { 963 struct amdgpu_fence *fence = to_amdgpu_fence(f); 964 return (const char *)fence->ring->name; 965 } 966 967 static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence) 968 { 969 return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); 970 } 971 972 static inline bool amdgpu_test_signaled_any(struct amdgpu_fence **fences) 973 { 974 int idx; 975 struct amdgpu_fence *fence; 976 977 idx = 0; 978 for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) { 979 fence = fences[idx]; 980 if (fence) { 981 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) 982 return true; 983 } 984 } 985 return false; 986 } 987 988 struct amdgpu_wait_cb { 989 struct fence_cb base; 990 struct task_struct *task; 991 }; 992 993 static void amdgpu_fence_wait_cb(struct fence *fence, struct fence_cb *cb) 994 { 995 struct amdgpu_wait_cb *wait = 996 container_of(cb, struct amdgpu_wait_cb, base); 997 wake_up_process(wait->task); 998 } 999 1000 static signed long amdgpu_fence_default_wait(struct fence *f, bool intr, 1001 signed long t) 1002 { 1003 struct amdgpu_fence *array[AMDGPU_MAX_RINGS]; 1004 struct amdgpu_fence *fence = to_amdgpu_fence(f); 1005 struct amdgpu_device *adev = fence->ring->adev; 1006 1007 memset(&array[0], 0, sizeof(array)); 1008 array[0] = fence; 1009 1010 return amdgpu_fence_wait_any(adev, array, intr, t); 1011 } 1012 1013 /* wait until any fence in array signaled */ 1014 signed long amdgpu_fence_wait_any(struct amdgpu_device *adev, 1015 struct amdgpu_fence **array, bool intr, signed long t) 1016 { 1017 long idx = 0; 1018 struct amdgpu_wait_cb cb[AMDGPU_MAX_RINGS]; 1019 struct amdgpu_fence *fence; 1020 1021 BUG_ON(!array); 1022 1023 for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) { 1024 fence = array[idx]; 1025 if (fence) { 1026 cb[idx].task = current; 1027 if (fence_add_callback(&fence->base, 1028 &cb[idx].base, amdgpu_fence_wait_cb)) 1029 return t; /* return if fence is already signaled */ 1030 } 1031 } 1032 1033 while (t > 0) { 1034 if (intr) 1035 set_current_state(TASK_INTERRUPTIBLE); 1036 else 1037 set_current_state(TASK_UNINTERRUPTIBLE); 1038 1039 /* 1040 * amdgpu_test_signaled_any must be called after 1041 * set_current_state to prevent a race with wake_up_process 1042 */ 1043 if (amdgpu_test_signaled_any(array)) 1044 break; 1045 1046 if (adev->needs_reset) { 1047 t = -EDEADLK; 1048 break; 1049 } 1050 1051 t = schedule_timeout(t); 1052 1053 if (t > 0 && intr && signal_pending(current)) 1054 t = -ERESTARTSYS; 1055 } 1056 1057 __set_current_state(TASK_RUNNING); 1058 1059 idx = 0; 1060 for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) { 1061 fence = array[idx]; 1062 if (fence) 1063 fence_remove_callback(&fence->base, &cb[idx].base); 1064 } 1065 1066 return t; 1067 } 1068 1069 const struct fence_ops amdgpu_fence_ops = { 1070 .get_driver_name = amdgpu_fence_get_driver_name, 1071 .get_timeline_name = amdgpu_fence_get_timeline_name, 1072 .enable_signaling = amdgpu_fence_enable_signaling, 1073 .signaled = amdgpu_fence_is_signaled, 1074 .wait = amdgpu_fence_default_wait, 1075 .release = NULL, 1076 }; 1077