1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 * Christian König 28 */ 29 #include <linux/seq_file.h> 30 #include <linux/slab.h> 31 #include <drm/drmP.h> 32 #include <drm/radeon_drm.h> 33 #include "radeon_reg.h" 34 #include "radeon.h" 35 #include "atom.h" 36 37 /* 38 * IB 39 * IBs (Indirect Buffers) and areas of GPU accessible memory where 40 * commands are stored. You can put a pointer to the IB in the 41 * command ring and the hw will fetch the commands from the IB 42 * and execute them. Generally userspace acceleration drivers 43 * produce command buffers which are send to the kernel and 44 * put in IBs for execution by the requested ring. 45 */ 46 static int radeon_debugfs_sa_init(struct radeon_device *rdev); 47 48 /** 49 * radeon_ib_get - request an IB (Indirect Buffer) 50 * 51 * @rdev: radeon_device pointer 52 * @ring: ring index the IB is associated with 53 * @ib: IB object returned 54 * @size: requested IB size 55 * 56 * Request an IB (all asics). IBs are allocated using the 57 * suballocator. 58 * Returns 0 on success, error on failure. 59 */ 60 int radeon_ib_get(struct radeon_device *rdev, int ring, 61 struct radeon_ib *ib, struct radeon_vm *vm, 62 unsigned size) 63 { 64 int i, r; 65 66 r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true); 67 if (r) { 68 dev_err(rdev->dev, "failed to get a new IB (%d)\n", r); 69 return r; 70 } 71 72 r = radeon_semaphore_create(rdev, &ib->semaphore); 73 if (r) { 74 return r; 75 } 76 77 ib->ring = ring; 78 ib->fence = NULL; 79 ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo); 80 ib->vm = vm; 81 if (vm) { 82 /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address 83 * space and soffset is the offset inside the pool bo 84 */ 85 ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET; 86 } else { 87 ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo); 88 } 89 ib->is_const_ib = false; 90 for (i = 0; i < RADEON_NUM_RINGS; ++i) 91 ib->sync_to[i] = NULL; 92 93 return 0; 94 } 95 96 /** 97 * radeon_ib_free - free an IB (Indirect Buffer) 98 * 99 * @rdev: radeon_device pointer 100 * @ib: IB object to free 101 * 102 * Free an IB (all asics). 103 */ 104 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib) 105 { 106 radeon_semaphore_free(rdev, &ib->semaphore, ib->fence); 107 radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence); 108 radeon_fence_unref(&ib->fence); 109 } 110 111 /** 112 * radeon_ib_sync_to - sync to fence before executing the IB 113 * 114 * @ib: IB object to add fence to 115 * @fence: fence to sync to 116 * 117 * Sync to the fence before executing the IB 118 */ 119 void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence) 120 { 121 struct radeon_fence *other; 122 123 if (!fence) 124 return; 125 126 other = ib->sync_to[fence->ring]; 127 ib->sync_to[fence->ring] = radeon_fence_later(fence, other); 128 } 129 130 /** 131 * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring 132 * 133 * @rdev: radeon_device pointer 134 * @ib: IB object to schedule 135 * @const_ib: Const IB to schedule (SI only) 136 * 137 * Schedule an IB on the associated ring (all asics). 138 * Returns 0 on success, error on failure. 139 * 140 * On SI, there are two parallel engines fed from the primary ring, 141 * the CE (Constant Engine) and the DE (Drawing Engine). Since 142 * resource descriptors have moved to memory, the CE allows you to 143 * prime the caches while the DE is updating register state so that 144 * the resource descriptors will be already in cache when the draw is 145 * processed. To accomplish this, the userspace driver submits two 146 * IBs, one for the CE and one for the DE. If there is a CE IB (called 147 * a CONST_IB), it will be put on the ring prior to the DE IB. Prior 148 * to SI there was just a DE IB. 149 */ 150 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, 151 struct radeon_ib *const_ib) 152 { 153 struct radeon_ring *ring = &rdev->ring[ib->ring]; 154 bool need_sync = false; 155 int i, r = 0; 156 157 if (!ib->length_dw || !ring->ready) { 158 /* TODO: Nothings in the ib we should report. */ 159 dev_err(rdev->dev, "couldn't schedule ib\n"); 160 return -EINVAL; 161 } 162 163 /* 64 dwords should be enough for fence too */ 164 r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8); 165 if (r) { 166 dev_err(rdev->dev, "scheduling IB failed (%d).\n", r); 167 return r; 168 } 169 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 170 struct radeon_fence *fence = ib->sync_to[i]; 171 if (radeon_fence_need_sync(fence, ib->ring)) { 172 need_sync = true; 173 radeon_semaphore_sync_rings(rdev, ib->semaphore, 174 fence->ring, ib->ring); 175 radeon_fence_note_sync(fence, ib->ring); 176 } 177 } 178 /* immediately free semaphore when we don't need to sync */ 179 if (!need_sync) { 180 radeon_semaphore_free(rdev, &ib->semaphore, NULL); 181 } 182 /* if we can't remember our last VM flush then flush now! */ 183 /* XXX figure out why we have to flush for every IB */ 184 if (ib->vm /*&& !ib->vm->last_flush*/) { 185 radeon_ring_vm_flush(rdev, ib->ring, ib->vm); 186 } 187 if (const_ib) { 188 radeon_ring_ib_execute(rdev, const_ib->ring, const_ib); 189 radeon_semaphore_free(rdev, &const_ib->semaphore, NULL); 190 } 191 radeon_ring_ib_execute(rdev, ib->ring, ib); 192 r = radeon_fence_emit(rdev, &ib->fence, ib->ring); 193 if (r) { 194 dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r); 195 radeon_ring_unlock_undo(rdev, ring); 196 return r; 197 } 198 if (const_ib) { 199 const_ib->fence = radeon_fence_ref(ib->fence); 200 } 201 /* we just flushed the VM, remember that */ 202 if (ib->vm && !ib->vm->last_flush) { 203 ib->vm->last_flush = radeon_fence_ref(ib->fence); 204 } 205 radeon_ring_unlock_commit(rdev, ring); 206 return 0; 207 } 208 209 /** 210 * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool 211 * 212 * @rdev: radeon_device pointer 213 * 214 * Initialize the suballocator to manage a pool of memory 215 * for use as IBs (all asics). 216 * Returns 0 on success, error on failure. 217 */ 218 int radeon_ib_pool_init(struct radeon_device *rdev) 219 { 220 int r; 221 222 if (rdev->ib_pool_ready) { 223 return 0; 224 } 225 r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo, 226 RADEON_IB_POOL_SIZE*64*1024, 227 RADEON_GEM_DOMAIN_GTT); 228 if (r) { 229 return r; 230 } 231 232 r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo); 233 if (r) { 234 return r; 235 } 236 237 rdev->ib_pool_ready = true; 238 if (radeon_debugfs_sa_init(rdev)) { 239 dev_err(rdev->dev, "failed to register debugfs file for SA\n"); 240 } 241 return 0; 242 } 243 244 /** 245 * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool 246 * 247 * @rdev: radeon_device pointer 248 * 249 * Tear down the suballocator managing the pool of memory 250 * for use as IBs (all asics). 251 */ 252 void radeon_ib_pool_fini(struct radeon_device *rdev) 253 { 254 if (rdev->ib_pool_ready) { 255 radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo); 256 radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo); 257 rdev->ib_pool_ready = false; 258 } 259 } 260 261 /** 262 * radeon_ib_ring_tests - test IBs on the rings 263 * 264 * @rdev: radeon_device pointer 265 * 266 * Test an IB (Indirect Buffer) on each ring. 267 * If the test fails, disable the ring. 268 * Returns 0 on success, error if the primary GFX ring 269 * IB test fails. 270 */ 271 int radeon_ib_ring_tests(struct radeon_device *rdev) 272 { 273 unsigned i; 274 int r; 275 276 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 277 struct radeon_ring *ring = &rdev->ring[i]; 278 279 if (!ring->ready) 280 continue; 281 282 r = radeon_ib_test(rdev, i, ring); 283 if (r) { 284 ring->ready = false; 285 286 if (i == RADEON_RING_TYPE_GFX_INDEX) { 287 /* oh, oh, that's really bad */ 288 DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r); 289 rdev->accel_working = false; 290 return r; 291 292 } else { 293 /* still not good, but we can live with it */ 294 DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r); 295 } 296 } 297 } 298 return 0; 299 } 300 301 /* 302 * Rings 303 * Most engines on the GPU are fed via ring buffers. Ring 304 * buffers are areas of GPU accessible memory that the host 305 * writes commands into and the GPU reads commands out of. 306 * There is a rptr (read pointer) that determines where the 307 * GPU is currently reading, and a wptr (write pointer) 308 * which determines where the host has written. When the 309 * pointers are equal, the ring is idle. When the host 310 * writes commands to the ring buffer, it increments the 311 * wptr. The GPU then starts fetching commands and executes 312 * them until the pointers are equal again. 313 */ 314 static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring); 315 316 /** 317 * radeon_ring_write - write a value to the ring 318 * 319 * @ring: radeon_ring structure holding ring information 320 * @v: dword (dw) value to write 321 * 322 * Write a value to the requested ring buffer (all asics). 323 */ 324 void radeon_ring_write(struct radeon_ring *ring, uint32_t v) 325 { 326 #if DRM_DEBUG_CODE 327 if (ring->count_dw <= 0) { 328 DRM_ERROR("radeon: writing more dwords to the ring than expected!\n"); 329 } 330 #endif 331 ring->ring[ring->wptr++] = v; 332 ring->wptr &= ring->ptr_mask; 333 ring->count_dw--; 334 ring->ring_free_dw--; 335 } 336 337 /** 338 * radeon_ring_supports_scratch_reg - check if the ring supports 339 * writing to scratch registers 340 * 341 * @rdev: radeon_device pointer 342 * @ring: radeon_ring structure holding ring information 343 * 344 * Check if a specific ring supports writing to scratch registers (all asics). 345 * Returns true if the ring supports writing to scratch regs, false if not. 346 */ 347 bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev, 348 struct radeon_ring *ring) 349 { 350 switch (ring->idx) { 351 case RADEON_RING_TYPE_GFX_INDEX: 352 case CAYMAN_RING_TYPE_CP1_INDEX: 353 case CAYMAN_RING_TYPE_CP2_INDEX: 354 return true; 355 default: 356 return false; 357 } 358 } 359 360 /** 361 * radeon_ring_free_size - update the free size 362 * 363 * @rdev: radeon_device pointer 364 * @ring: radeon_ring structure holding ring information 365 * 366 * Update the free dw slots in the ring buffer (all asics). 367 */ 368 void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring) 369 { 370 u32 rptr; 371 372 if (rdev->wb.enabled && ring != &rdev->ring[R600_RING_TYPE_UVD_INDEX]) 373 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); 374 else 375 rptr = RREG32(ring->rptr_reg); 376 ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; 377 /* This works because ring_size is a power of 2 */ 378 ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4)); 379 ring->ring_free_dw -= ring->wptr; 380 ring->ring_free_dw &= ring->ptr_mask; 381 if (!ring->ring_free_dw) { 382 ring->ring_free_dw = ring->ring_size / 4; 383 } 384 } 385 386 /** 387 * radeon_ring_alloc - allocate space on the ring buffer 388 * 389 * @rdev: radeon_device pointer 390 * @ring: radeon_ring structure holding ring information 391 * @ndw: number of dwords to allocate in the ring buffer 392 * 393 * Allocate @ndw dwords in the ring buffer (all asics). 394 * Returns 0 on success, error on failure. 395 */ 396 int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw) 397 { 398 int r; 399 400 /* make sure we aren't trying to allocate more space than there is on the ring */ 401 if (ndw > (ring->ring_size / 4)) 402 return -ENOMEM; 403 /* Align requested size with padding so unlock_commit can 404 * pad safely */ 405 ndw = (ndw + ring->align_mask) & ~ring->align_mask; 406 while (ndw > (ring->ring_free_dw - 1)) { 407 radeon_ring_free_size(rdev, ring); 408 if (ndw < ring->ring_free_dw) { 409 break; 410 } 411 r = radeon_fence_wait_next_locked(rdev, ring->idx); 412 if (r) 413 return r; 414 } 415 ring->count_dw = ndw; 416 ring->wptr_old = ring->wptr; 417 return 0; 418 } 419 420 /** 421 * radeon_ring_lock - lock the ring and allocate space on it 422 * 423 * @rdev: radeon_device pointer 424 * @ring: radeon_ring structure holding ring information 425 * @ndw: number of dwords to allocate in the ring buffer 426 * 427 * Lock the ring and allocate @ndw dwords in the ring buffer 428 * (all asics). 429 * Returns 0 on success, error on failure. 430 */ 431 int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw) 432 { 433 int r; 434 435 mutex_lock(&rdev->ring_lock); 436 r = radeon_ring_alloc(rdev, ring, ndw); 437 if (r) { 438 mutex_unlock(&rdev->ring_lock); 439 return r; 440 } 441 return 0; 442 } 443 444 /** 445 * radeon_ring_commit - tell the GPU to execute the new 446 * commands on the ring buffer 447 * 448 * @rdev: radeon_device pointer 449 * @ring: radeon_ring structure holding ring information 450 * 451 * Update the wptr (write pointer) to tell the GPU to 452 * execute new commands on the ring buffer (all asics). 453 */ 454 void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring) 455 { 456 /* We pad to match fetch size */ 457 while (ring->wptr & ring->align_mask) { 458 radeon_ring_write(ring, ring->nop); 459 } 460 DRM_MEMORYBARRIER(); 461 WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask); 462 (void)RREG32(ring->wptr_reg); 463 } 464 465 /** 466 * radeon_ring_unlock_commit - tell the GPU to execute the new 467 * commands on the ring buffer and unlock it 468 * 469 * @rdev: radeon_device pointer 470 * @ring: radeon_ring structure holding ring information 471 * 472 * Call radeon_ring_commit() then unlock the ring (all asics). 473 */ 474 void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring) 475 { 476 radeon_ring_commit(rdev, ring); 477 mutex_unlock(&rdev->ring_lock); 478 } 479 480 /** 481 * radeon_ring_undo - reset the wptr 482 * 483 * @ring: radeon_ring structure holding ring information 484 * 485 * Reset the driver's copy of the wptr (all asics). 486 */ 487 void radeon_ring_undo(struct radeon_ring *ring) 488 { 489 ring->wptr = ring->wptr_old; 490 } 491 492 /** 493 * radeon_ring_unlock_undo - reset the wptr and unlock the ring 494 * 495 * @ring: radeon_ring structure holding ring information 496 * 497 * Call radeon_ring_undo() then unlock the ring (all asics). 498 */ 499 void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring) 500 { 501 radeon_ring_undo(ring); 502 mutex_unlock(&rdev->ring_lock); 503 } 504 505 /** 506 * radeon_ring_force_activity - add some nop packets to the ring 507 * 508 * @rdev: radeon_device pointer 509 * @ring: radeon_ring structure holding ring information 510 * 511 * Add some nop packets to the ring to force activity (all asics). 512 * Used for lockup detection to see if the rptr is advancing. 513 */ 514 void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring) 515 { 516 int r; 517 518 radeon_ring_free_size(rdev, ring); 519 if (ring->rptr == ring->wptr) { 520 r = radeon_ring_alloc(rdev, ring, 1); 521 if (!r) { 522 radeon_ring_write(ring, ring->nop); 523 radeon_ring_commit(rdev, ring); 524 } 525 } 526 } 527 528 /** 529 * radeon_ring_lockup_update - update lockup variables 530 * 531 * @ring: radeon_ring structure holding ring information 532 * 533 * Update the last rptr value and timestamp (all asics). 534 */ 535 void radeon_ring_lockup_update(struct radeon_ring *ring) 536 { 537 ring->last_rptr = ring->rptr; 538 ring->last_activity = jiffies; 539 } 540 541 /** 542 * radeon_ring_test_lockup() - check if ring is lockedup by recording information 543 * @rdev: radeon device structure 544 * @ring: radeon_ring structure holding ring information 545 * 546 * We don't need to initialize the lockup tracking information as we will either 547 * have CP rptr to a different value of jiffies wrap around which will force 548 * initialization of the lockup tracking informations. 549 * 550 * A possible false positivie is if we get call after while and last_cp_rptr == 551 * the current CP rptr, even if it's unlikely it might happen. To avoid this 552 * if the elapsed time since last call is bigger than 2 second than we return 553 * false and update the tracking information. Due to this the caller must call 554 * radeon_ring_test_lockup several time in less than 2sec for lockup to be reported 555 * the fencing code should be cautious about that. 556 * 557 * Caller should write to the ring to force CP to do something so we don't get 558 * false positive when CP is just gived nothing to do. 559 * 560 **/ 561 bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 562 { 563 unsigned long cjiffies, elapsed; 564 uint32_t rptr; 565 566 cjiffies = jiffies; 567 if (!time_after(cjiffies, ring->last_activity)) { 568 /* likely a wrap around */ 569 radeon_ring_lockup_update(ring); 570 return false; 571 } 572 rptr = RREG32(ring->rptr_reg); 573 ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; 574 if (ring->rptr != ring->last_rptr) { 575 /* CP is still working no lockup */ 576 radeon_ring_lockup_update(ring); 577 return false; 578 } 579 elapsed = jiffies_to_msecs(cjiffies - ring->last_activity); 580 if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) { 581 dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed); 582 return true; 583 } 584 /* give a chance to the GPU ... */ 585 return false; 586 } 587 588 /** 589 * radeon_ring_backup - Back up the content of a ring 590 * 591 * @rdev: radeon_device pointer 592 * @ring: the ring we want to back up 593 * 594 * Saves all unprocessed commits from a ring, returns the number of dwords saved. 595 */ 596 unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring, 597 uint32_t **data) 598 { 599 unsigned size, ptr, i; 600 601 /* just in case lock the ring */ 602 mutex_lock(&rdev->ring_lock); 603 *data = NULL; 604 605 if (ring->ring_obj == NULL) { 606 mutex_unlock(&rdev->ring_lock); 607 return 0; 608 } 609 610 /* it doesn't make sense to save anything if all fences are signaled */ 611 if (!radeon_fence_count_emitted(rdev, ring->idx)) { 612 mutex_unlock(&rdev->ring_lock); 613 return 0; 614 } 615 616 /* calculate the number of dw on the ring */ 617 if (ring->rptr_save_reg) 618 ptr = RREG32(ring->rptr_save_reg); 619 else if (rdev->wb.enabled) 620 ptr = le32_to_cpu(*ring->next_rptr_cpu_addr); 621 else { 622 /* no way to read back the next rptr */ 623 mutex_unlock(&rdev->ring_lock); 624 return 0; 625 } 626 627 size = ring->wptr + (ring->ring_size / 4); 628 size -= ptr; 629 size &= ring->ptr_mask; 630 if (size == 0) { 631 mutex_unlock(&rdev->ring_lock); 632 return 0; 633 } 634 635 /* and then save the content of the ring */ 636 *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); 637 if (!*data) { 638 mutex_unlock(&rdev->ring_lock); 639 return 0; 640 } 641 for (i = 0; i < size; ++i) { 642 (*data)[i] = ring->ring[ptr++]; 643 ptr &= ring->ptr_mask; 644 } 645 646 mutex_unlock(&rdev->ring_lock); 647 return size; 648 } 649 650 /** 651 * radeon_ring_restore - append saved commands to the ring again 652 * 653 * @rdev: radeon_device pointer 654 * @ring: ring to append commands to 655 * @size: number of dwords we want to write 656 * @data: saved commands 657 * 658 * Allocates space on the ring and restore the previously saved commands. 659 */ 660 int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring, 661 unsigned size, uint32_t *data) 662 { 663 int i, r; 664 665 if (!size || !data) 666 return 0; 667 668 /* restore the saved ring content */ 669 r = radeon_ring_lock(rdev, ring, size); 670 if (r) 671 return r; 672 673 for (i = 0; i < size; ++i) { 674 radeon_ring_write(ring, data[i]); 675 } 676 677 radeon_ring_unlock_commit(rdev, ring); 678 kfree(data); 679 return 0; 680 } 681 682 /** 683 * radeon_ring_init - init driver ring struct. 684 * 685 * @rdev: radeon_device pointer 686 * @ring: radeon_ring structure holding ring information 687 * @ring_size: size of the ring 688 * @rptr_offs: offset of the rptr writeback location in the WB buffer 689 * @rptr_reg: MMIO offset of the rptr register 690 * @wptr_reg: MMIO offset of the wptr register 691 * @ptr_reg_shift: bit offset of the rptr/wptr values 692 * @ptr_reg_mask: bit mask of the rptr/wptr values 693 * @nop: nop packet for this ring 694 * 695 * Initialize the driver information for the selected ring (all asics). 696 * Returns 0 on success, error on failure. 697 */ 698 int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size, 699 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, 700 u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop) 701 { 702 int r; 703 704 ring->ring_size = ring_size; 705 ring->rptr_offs = rptr_offs; 706 ring->rptr_reg = rptr_reg; 707 ring->wptr_reg = wptr_reg; 708 ring->ptr_reg_shift = ptr_reg_shift; 709 ring->ptr_reg_mask = ptr_reg_mask; 710 ring->nop = nop; 711 /* Allocate ring buffer */ 712 if (ring->ring_obj == NULL) { 713 r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true, 714 RADEON_GEM_DOMAIN_GTT, 715 NULL, &ring->ring_obj); 716 if (r) { 717 dev_err(rdev->dev, "(%d) ring create failed\n", r); 718 return r; 719 } 720 r = radeon_bo_reserve(ring->ring_obj, false); 721 if (unlikely(r != 0)) 722 return r; 723 r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT, 724 &ring->gpu_addr); 725 if (r) { 726 radeon_bo_unreserve(ring->ring_obj); 727 dev_err(rdev->dev, "(%d) ring pin failed\n", r); 728 return r; 729 } 730 r = radeon_bo_kmap(ring->ring_obj, 731 (void **)&ring->ring); 732 radeon_bo_unreserve(ring->ring_obj); 733 if (r) { 734 dev_err(rdev->dev, "(%d) ring map failed\n", r); 735 return r; 736 } 737 } 738 ring->ptr_mask = (ring->ring_size / 4) - 1; 739 ring->ring_free_dw = ring->ring_size / 4; 740 if (rdev->wb.enabled) { 741 u32 index = RADEON_WB_RING0_NEXT_RPTR + (ring->idx * 4); 742 ring->next_rptr_gpu_addr = rdev->wb.gpu_addr + index; 743 ring->next_rptr_cpu_addr = &rdev->wb.wb[index/4]; 744 } 745 if (radeon_debugfs_ring_init(rdev, ring)) { 746 DRM_ERROR("Failed to register debugfs file for rings !\n"); 747 } 748 radeon_ring_lockup_update(ring); 749 return 0; 750 } 751 752 /** 753 * radeon_ring_fini - tear down the driver ring struct. 754 * 755 * @rdev: radeon_device pointer 756 * @ring: radeon_ring structure holding ring information 757 * 758 * Tear down the driver information for the selected ring (all asics). 759 */ 760 void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring) 761 { 762 int r; 763 struct radeon_bo *ring_obj; 764 765 mutex_lock(&rdev->ring_lock); 766 ring_obj = ring->ring_obj; 767 ring->ready = false; 768 ring->ring = NULL; 769 ring->ring_obj = NULL; 770 mutex_unlock(&rdev->ring_lock); 771 772 if (ring_obj) { 773 r = radeon_bo_reserve(ring_obj, false); 774 if (likely(r == 0)) { 775 radeon_bo_kunmap(ring_obj); 776 radeon_bo_unpin(ring_obj); 777 radeon_bo_unreserve(ring_obj); 778 } 779 radeon_bo_unref(&ring_obj); 780 } 781 } 782 783 /* 784 * Debugfs info 785 */ 786 #if defined(CONFIG_DEBUG_FS) 787 788 static int radeon_debugfs_ring_info(struct seq_file *m, void *data) 789 { 790 struct drm_info_node *node = (struct drm_info_node *) m->private; 791 struct drm_device *dev = node->minor->dev; 792 struct radeon_device *rdev = dev->dev_private; 793 int ridx = *(int*)node->info_ent->data; 794 struct radeon_ring *ring = &rdev->ring[ridx]; 795 unsigned count, i, j; 796 u32 tmp; 797 798 radeon_ring_free_size(rdev, ring); 799 count = (ring->ring_size / 4) - ring->ring_free_dw; 800 tmp = RREG32(ring->wptr_reg) >> ring->ptr_reg_shift; 801 seq_printf(m, "wptr(0x%04x): 0x%08x [%5d]\n", ring->wptr_reg, tmp, tmp); 802 tmp = RREG32(ring->rptr_reg) >> ring->ptr_reg_shift; 803 seq_printf(m, "rptr(0x%04x): 0x%08x [%5d]\n", ring->rptr_reg, tmp, tmp); 804 if (ring->rptr_save_reg) { 805 seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg, 806 RREG32(ring->rptr_save_reg)); 807 } 808 seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr); 809 seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr); 810 seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr); 811 seq_printf(m, "last semaphore wait addr : 0x%016llx\n", ring->last_semaphore_wait_addr); 812 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); 813 seq_printf(m, "%u dwords in ring\n", count); 814 /* print 8 dw before current rptr as often it's the last executed 815 * packet that is the root issue 816 */ 817 i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; 818 for (j = 0; j <= (count + 32); j++) { 819 seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]); 820 i = (i + 1) & ring->ptr_mask; 821 } 822 return 0; 823 } 824 825 static int radeon_gfx_index = RADEON_RING_TYPE_GFX_INDEX; 826 static int cayman_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX; 827 static int cayman_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX; 828 static int radeon_dma1_index = R600_RING_TYPE_DMA_INDEX; 829 static int radeon_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX; 830 static int r600_uvd_index = R600_RING_TYPE_UVD_INDEX; 831 832 static struct drm_info_list radeon_debugfs_ring_info_list[] = { 833 {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_gfx_index}, 834 {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_cp1_index}, 835 {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_cp2_index}, 836 {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_dma1_index}, 837 {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_dma2_index}, 838 {"radeon_ring_uvd", radeon_debugfs_ring_info, 0, &r600_uvd_index}, 839 }; 840 841 static int radeon_debugfs_sa_info(struct seq_file *m, void *data) 842 { 843 struct drm_info_node *node = (struct drm_info_node *) m->private; 844 struct drm_device *dev = node->minor->dev; 845 struct radeon_device *rdev = dev->dev_private; 846 847 radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m); 848 849 return 0; 850 851 } 852 853 static struct drm_info_list radeon_debugfs_sa_list[] = { 854 {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL}, 855 }; 856 857 #endif 858 859 static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring) 860 { 861 #if defined(CONFIG_DEBUG_FS) 862 unsigned i; 863 for (i = 0; i < ARRAY_SIZE(radeon_debugfs_ring_info_list); ++i) { 864 struct drm_info_list *info = &radeon_debugfs_ring_info_list[i]; 865 int ridx = *(int*)radeon_debugfs_ring_info_list[i].data; 866 unsigned r; 867 868 if (&rdev->ring[ridx] != ring) 869 continue; 870 871 r = radeon_debugfs_add_files(rdev, info, 1); 872 if (r) 873 return r; 874 } 875 #endif 876 return 0; 877 } 878 879 static int radeon_debugfs_sa_init(struct radeon_device *rdev) 880 { 881 #if defined(CONFIG_DEBUG_FS) 882 return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1); 883 #else 884 return 0; 885 #endif 886 } 887