1 /* 2 * Copyright 2009 VMware, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Michel Dänzer 23 */ 24 #include <drm/drmP.h> 25 #include <drm/radeon_drm.h> 26 #include "radeon_reg.h" 27 #include "radeon.h" 28 29 #define RADEON_TEST_COPY_BLIT 1 30 #define RADEON_TEST_COPY_DMA 0 31 32 33 /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ 34 static void radeon_do_test_moves(struct radeon_device *rdev, int flag) 35 { 36 struct radeon_bo *vram_obj = NULL; 37 struct radeon_bo **gtt_obj = NULL; 38 uint64_t gtt_addr, vram_addr; 39 unsigned n, size; 40 int i, r, ring; 41 42 switch (flag) { 43 case RADEON_TEST_COPY_DMA: 44 ring = radeon_copy_dma_ring_index(rdev); 45 break; 46 case RADEON_TEST_COPY_BLIT: 47 ring = radeon_copy_blit_ring_index(rdev); 48 break; 49 default: 50 DRM_ERROR("Unknown copy method\n"); 51 return; 52 } 53 54 size = 1024 * 1024; 55 56 /* Number of tests = 57 * (Total GTT - IB pool - writeback page - ring buffers) / test size 58 */ 59 n = rdev->mc.gtt_size - rdev->gart_pin_size; 60 n /= size; 61 62 gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); 63 if (!gtt_obj) { 64 DRM_ERROR("Failed to allocate %d pointers\n", n); 65 r = 1; 66 goto out_cleanup; 67 } 68 69 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 70 0, NULL, NULL, &vram_obj); 71 if (r) { 72 DRM_ERROR("Failed to create VRAM object\n"); 73 goto out_cleanup; 74 } 75 r = radeon_bo_reserve(vram_obj, false); 76 if (unlikely(r != 0)) 77 goto out_unref; 78 r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr); 79 if (r) { 80 DRM_ERROR("Failed to pin VRAM object\n"); 81 goto out_unres; 82 } 83 for (i = 0; i < n; i++) { 84 void *gtt_map, *vram_map; 85 void **gtt_start, **gtt_end; 86 void **vram_start, **vram_end; 87 struct radeon_fence *fence = NULL; 88 89 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, 90 RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL, 91 gtt_obj + i); 92 if (r) { 93 DRM_ERROR("Failed to create GTT object %d\n", i); 94 goto out_lclean; 95 } 96 97 r = radeon_bo_reserve(gtt_obj[i], false); 98 if (unlikely(r != 0)) 99 goto out_lclean_unref; 100 r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, >t_addr); 101 if (r) { 102 DRM_ERROR("Failed to pin GTT object %d\n", i); 103 goto out_lclean_unres; 104 } 105 106 r = radeon_bo_kmap(gtt_obj[i], >t_map); 107 if (r) { 108 DRM_ERROR("Failed to map GTT object %d\n", i); 109 goto out_lclean_unpin; 110 } 111 112 for (gtt_start = gtt_map, gtt_end = gtt_map + size; 113 gtt_start < gtt_end; 114 gtt_start++) 115 *gtt_start = gtt_start; 116 117 radeon_bo_kunmap(gtt_obj[i]); 118 119 if (ring == R600_RING_TYPE_DMA_INDEX) 120 fence = radeon_copy_dma(rdev, gtt_addr, vram_addr, 121 size / RADEON_GPU_PAGE_SIZE, 122 vram_obj->tbo.resv); 123 else 124 fence = radeon_copy_blit(rdev, gtt_addr, vram_addr, 125 size / RADEON_GPU_PAGE_SIZE, 126 vram_obj->tbo.resv); 127 if (IS_ERR(fence)) { 128 DRM_ERROR("Failed GTT->VRAM copy %d\n", i); 129 r = PTR_ERR(fence); 130 goto out_lclean_unpin; 131 } 132 133 r = radeon_fence_wait(fence, false); 134 if (r) { 135 DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); 136 goto out_lclean_unpin; 137 } 138 139 radeon_fence_unref(&fence); 140 141 r = radeon_bo_kmap(vram_obj, &vram_map); 142 if (r) { 143 DRM_ERROR("Failed to map VRAM object after copy %d\n", i); 144 goto out_lclean_unpin; 145 } 146 147 for (gtt_start = gtt_map, gtt_end = gtt_map + size, 148 vram_start = vram_map, vram_end = vram_map + size; 149 vram_start < vram_end; 150 gtt_start++, vram_start++) { 151 if (*vram_start != gtt_start) { 152 DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " 153 "expected 0x%p (GTT/VRAM offset " 154 "0x%16llx/0x%16llx)\n", 155 i, *vram_start, gtt_start, 156 (unsigned long long) 157 (gtt_addr - rdev->mc.gtt_start + 158 (void*)gtt_start - gtt_map), 159 (unsigned long long) 160 (vram_addr - rdev->mc.vram_start + 161 (void*)gtt_start - gtt_map)); 162 radeon_bo_kunmap(vram_obj); 163 goto out_lclean_unpin; 164 } 165 *vram_start = vram_start; 166 } 167 168 radeon_bo_kunmap(vram_obj); 169 170 if (ring == R600_RING_TYPE_DMA_INDEX) 171 fence = radeon_copy_dma(rdev, vram_addr, gtt_addr, 172 size / RADEON_GPU_PAGE_SIZE, 173 vram_obj->tbo.resv); 174 else 175 fence = radeon_copy_blit(rdev, vram_addr, gtt_addr, 176 size / RADEON_GPU_PAGE_SIZE, 177 vram_obj->tbo.resv); 178 if (IS_ERR(fence)) { 179 DRM_ERROR("Failed VRAM->GTT copy %d\n", i); 180 r = PTR_ERR(fence); 181 goto out_lclean_unpin; 182 } 183 184 r = radeon_fence_wait(fence, false); 185 if (r) { 186 DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); 187 goto out_lclean_unpin; 188 } 189 190 radeon_fence_unref(&fence); 191 192 r = radeon_bo_kmap(gtt_obj[i], >t_map); 193 if (r) { 194 DRM_ERROR("Failed to map GTT object after copy %d\n", i); 195 goto out_lclean_unpin; 196 } 197 198 for (gtt_start = gtt_map, gtt_end = gtt_map + size, 199 vram_start = vram_map, vram_end = vram_map + size; 200 gtt_start < gtt_end; 201 gtt_start++, vram_start++) { 202 if (*gtt_start != vram_start) { 203 DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " 204 "expected 0x%p (VRAM/GTT offset " 205 "0x%16llx/0x%16llx)\n", 206 i, *gtt_start, vram_start, 207 (unsigned long long) 208 (vram_addr - rdev->mc.vram_start + 209 (void*)vram_start - vram_map), 210 (unsigned long long) 211 (gtt_addr - rdev->mc.gtt_start + 212 (void*)vram_start - vram_map)); 213 radeon_bo_kunmap(gtt_obj[i]); 214 goto out_lclean_unpin; 215 } 216 } 217 218 radeon_bo_kunmap(gtt_obj[i]); 219 220 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", 221 gtt_addr - rdev->mc.gtt_start); 222 continue; 223 224 out_lclean_unpin: 225 radeon_bo_unpin(gtt_obj[i]); 226 out_lclean_unres: 227 radeon_bo_unreserve(gtt_obj[i]); 228 out_lclean_unref: 229 radeon_bo_unref(>t_obj[i]); 230 out_lclean: 231 for (--i; i >= 0; --i) { 232 radeon_bo_unpin(gtt_obj[i]); 233 radeon_bo_unreserve(gtt_obj[i]); 234 radeon_bo_unref(>t_obj[i]); 235 } 236 if (fence && !IS_ERR(fence)) 237 radeon_fence_unref(&fence); 238 break; 239 } 240 241 radeon_bo_unpin(vram_obj); 242 out_unres: 243 radeon_bo_unreserve(vram_obj); 244 out_unref: 245 radeon_bo_unref(&vram_obj); 246 out_cleanup: 247 kfree(gtt_obj); 248 if (r) { 249 printk(KERN_WARNING "Error while testing BO move.\n"); 250 } 251 } 252 253 void radeon_test_moves(struct radeon_device *rdev) 254 { 255 if (rdev->asic->copy.dma) 256 radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA); 257 if (rdev->asic->copy.blit) 258 radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT); 259 } 260 261 static int radeon_test_create_and_emit_fence(struct radeon_device *rdev, 262 struct radeon_ring *ring, 263 struct radeon_fence **fence) 264 { 265 uint32_t handle = ring->idx ^ 0xdeafbeef; 266 int r; 267 268 if (ring->idx == R600_RING_TYPE_UVD_INDEX) { 269 r = radeon_uvd_get_create_msg(rdev, ring->idx, handle, NULL); 270 if (r) { 271 DRM_ERROR("Failed to get dummy create msg\n"); 272 return r; 273 } 274 275 r = radeon_uvd_get_destroy_msg(rdev, ring->idx, handle, fence); 276 if (r) { 277 DRM_ERROR("Failed to get dummy destroy msg\n"); 278 return r; 279 } 280 281 } else if (ring->idx == TN_RING_TYPE_VCE1_INDEX || 282 ring->idx == TN_RING_TYPE_VCE2_INDEX) { 283 r = radeon_vce_get_create_msg(rdev, ring->idx, handle, NULL); 284 if (r) { 285 DRM_ERROR("Failed to get dummy create msg\n"); 286 return r; 287 } 288 289 r = radeon_vce_get_destroy_msg(rdev, ring->idx, handle, fence); 290 if (r) { 291 DRM_ERROR("Failed to get dummy destroy msg\n"); 292 return r; 293 } 294 295 } else { 296 r = radeon_ring_lock(rdev, ring, 64); 297 if (r) { 298 DRM_ERROR("Failed to lock ring A %d\n", ring->idx); 299 return r; 300 } 301 radeon_fence_emit(rdev, fence, ring->idx); 302 radeon_ring_unlock_commit(rdev, ring, false); 303 } 304 return 0; 305 } 306 307 void radeon_test_ring_sync(struct radeon_device *rdev, 308 struct radeon_ring *ringA, 309 struct radeon_ring *ringB) 310 { 311 struct radeon_fence *fence1 = NULL, *fence2 = NULL; 312 struct radeon_semaphore *semaphore = NULL; 313 int r; 314 315 r = radeon_semaphore_create(rdev, &semaphore); 316 if (r) { 317 DRM_ERROR("Failed to create semaphore\n"); 318 goto out_cleanup; 319 } 320 321 r = radeon_ring_lock(rdev, ringA, 64); 322 if (r) { 323 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); 324 goto out_cleanup; 325 } 326 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 327 radeon_ring_unlock_commit(rdev, ringA, false); 328 329 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1); 330 if (r) 331 goto out_cleanup; 332 333 r = radeon_ring_lock(rdev, ringA, 64); 334 if (r) { 335 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); 336 goto out_cleanup; 337 } 338 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 339 radeon_ring_unlock_commit(rdev, ringA, false); 340 341 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2); 342 if (r) 343 goto out_cleanup; 344 345 mdelay(1000); 346 347 if (radeon_fence_signaled(fence1)) { 348 DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n"); 349 goto out_cleanup; 350 } 351 352 r = radeon_ring_lock(rdev, ringB, 64); 353 if (r) { 354 DRM_ERROR("Failed to lock ring B %p\n", ringB); 355 goto out_cleanup; 356 } 357 radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); 358 radeon_ring_unlock_commit(rdev, ringB, false); 359 360 r = radeon_fence_wait(fence1, false); 361 if (r) { 362 DRM_ERROR("Failed to wait for sync fence 1\n"); 363 goto out_cleanup; 364 } 365 366 mdelay(1000); 367 368 if (radeon_fence_signaled(fence2)) { 369 DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n"); 370 goto out_cleanup; 371 } 372 373 r = radeon_ring_lock(rdev, ringB, 64); 374 if (r) { 375 DRM_ERROR("Failed to lock ring B %p\n", ringB); 376 goto out_cleanup; 377 } 378 radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); 379 radeon_ring_unlock_commit(rdev, ringB, false); 380 381 r = radeon_fence_wait(fence2, false); 382 if (r) { 383 DRM_ERROR("Failed to wait for sync fence 1\n"); 384 goto out_cleanup; 385 } 386 387 out_cleanup: 388 radeon_semaphore_free(rdev, &semaphore, NULL); 389 390 if (fence1) 391 radeon_fence_unref(&fence1); 392 393 if (fence2) 394 radeon_fence_unref(&fence2); 395 396 if (r) 397 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); 398 } 399 400 static void radeon_test_ring_sync2(struct radeon_device *rdev, 401 struct radeon_ring *ringA, 402 struct radeon_ring *ringB, 403 struct radeon_ring *ringC) 404 { 405 struct radeon_fence *fenceA = NULL, *fenceB = NULL; 406 struct radeon_semaphore *semaphore = NULL; 407 bool sigA, sigB; 408 int i, r; 409 410 r = radeon_semaphore_create(rdev, &semaphore); 411 if (r) { 412 DRM_ERROR("Failed to create semaphore\n"); 413 goto out_cleanup; 414 } 415 416 r = radeon_ring_lock(rdev, ringA, 64); 417 if (r) { 418 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); 419 goto out_cleanup; 420 } 421 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 422 radeon_ring_unlock_commit(rdev, ringA, false); 423 424 r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA); 425 if (r) 426 goto out_cleanup; 427 428 r = radeon_ring_lock(rdev, ringB, 64); 429 if (r) { 430 DRM_ERROR("Failed to lock ring B %d\n", ringB->idx); 431 goto out_cleanup; 432 } 433 radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore); 434 radeon_ring_unlock_commit(rdev, ringB, false); 435 r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB); 436 if (r) 437 goto out_cleanup; 438 439 mdelay(1000); 440 441 if (radeon_fence_signaled(fenceA)) { 442 DRM_ERROR("Fence A signaled without waiting for semaphore.\n"); 443 goto out_cleanup; 444 } 445 if (radeon_fence_signaled(fenceB)) { 446 DRM_ERROR("Fence B signaled without waiting for semaphore.\n"); 447 goto out_cleanup; 448 } 449 450 r = radeon_ring_lock(rdev, ringC, 64); 451 if (r) { 452 DRM_ERROR("Failed to lock ring B %p\n", ringC); 453 goto out_cleanup; 454 } 455 radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); 456 radeon_ring_unlock_commit(rdev, ringC, false); 457 458 for (i = 0; i < 30; ++i) { 459 mdelay(100); 460 sigA = radeon_fence_signaled(fenceA); 461 sigB = radeon_fence_signaled(fenceB); 462 if (sigA || sigB) 463 break; 464 } 465 466 if (!sigA && !sigB) { 467 DRM_ERROR("Neither fence A nor B has been signaled\n"); 468 goto out_cleanup; 469 } else if (sigA && sigB) { 470 DRM_ERROR("Both fence A and B has been signaled\n"); 471 goto out_cleanup; 472 } 473 474 DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B'); 475 476 r = radeon_ring_lock(rdev, ringC, 64); 477 if (r) { 478 DRM_ERROR("Failed to lock ring B %p\n", ringC); 479 goto out_cleanup; 480 } 481 radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); 482 radeon_ring_unlock_commit(rdev, ringC, false); 483 484 mdelay(1000); 485 486 r = radeon_fence_wait(fenceA, false); 487 if (r) { 488 DRM_ERROR("Failed to wait for sync fence A\n"); 489 goto out_cleanup; 490 } 491 r = radeon_fence_wait(fenceB, false); 492 if (r) { 493 DRM_ERROR("Failed to wait for sync fence B\n"); 494 goto out_cleanup; 495 } 496 497 out_cleanup: 498 radeon_semaphore_free(rdev, &semaphore, NULL); 499 500 if (fenceA) 501 radeon_fence_unref(&fenceA); 502 503 if (fenceB) 504 radeon_fence_unref(&fenceB); 505 506 if (r) 507 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); 508 } 509 510 static bool radeon_test_sync_possible(struct radeon_ring *ringA, 511 struct radeon_ring *ringB) 512 { 513 if (ringA->idx == TN_RING_TYPE_VCE2_INDEX && 514 ringB->idx == TN_RING_TYPE_VCE1_INDEX) 515 return false; 516 517 return true; 518 } 519 520 void radeon_test_syncing(struct radeon_device *rdev) 521 { 522 int i, j, k; 523 524 for (i = 1; i < RADEON_NUM_RINGS; ++i) { 525 struct radeon_ring *ringA = &rdev->ring[i]; 526 if (!ringA->ready) 527 continue; 528 529 for (j = 0; j < i; ++j) { 530 struct radeon_ring *ringB = &rdev->ring[j]; 531 if (!ringB->ready) 532 continue; 533 534 if (!radeon_test_sync_possible(ringA, ringB)) 535 continue; 536 537 DRM_INFO("Testing syncing between rings %d and %d...\n", i, j); 538 radeon_test_ring_sync(rdev, ringA, ringB); 539 540 DRM_INFO("Testing syncing between rings %d and %d...\n", j, i); 541 radeon_test_ring_sync(rdev, ringB, ringA); 542 543 for (k = 0; k < j; ++k) { 544 struct radeon_ring *ringC = &rdev->ring[k]; 545 if (!ringC->ready) 546 continue; 547 548 if (!radeon_test_sync_possible(ringA, ringC)) 549 continue; 550 551 if (!radeon_test_sync_possible(ringB, ringC)) 552 continue; 553 554 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k); 555 radeon_test_ring_sync2(rdev, ringA, ringB, ringC); 556 557 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j); 558 radeon_test_ring_sync2(rdev, ringA, ringC, ringB); 559 560 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k); 561 radeon_test_ring_sync2(rdev, ringB, ringA, ringC); 562 563 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i); 564 radeon_test_ring_sync2(rdev, ringB, ringC, ringA); 565 566 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j); 567 radeon_test_ring_sync2(rdev, ringC, ringA, ringB); 568 569 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i); 570 radeon_test_ring_sync2(rdev, ringC, ringB, ringA); 571 } 572 } 573 } 574 } 575