1 /* 2 * Copyright 2008 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: 25 * Jerome Glisse <glisse@freedesktop.org> 26 */ 27 28 #include <linux/file.h> 29 #include <linux/pagemap.h> 30 #include <linux/sync_file.h> 31 #include <linux/dma-buf.h> 32 33 #include <drm/amdgpu_drm.h> 34 #include <drm/drm_syncobj.h> 35 #include <drm/ttm/ttm_tt.h> 36 37 #include "amdgpu_cs.h" 38 #include "amdgpu.h" 39 #include "amdgpu_trace.h" 40 #include "amdgpu_gmc.h" 41 #include "amdgpu_gem.h" 42 #include "amdgpu_ras.h" 43 44 static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, 45 struct amdgpu_device *adev, 46 struct drm_file *filp, 47 union drm_amdgpu_cs *cs) 48 { 49 struct amdgpu_fpriv *fpriv = filp->driver_priv; 50 51 if (cs->in.num_chunks == 0) 52 return -EINVAL; 53 54 memset(p, 0, sizeof(*p)); 55 p->adev = adev; 56 p->filp = filp; 57 58 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id); 59 if (!p->ctx) 60 return -EINVAL; 61 62 if (atomic_read(&p->ctx->guilty)) { 63 amdgpu_ctx_put(p->ctx); 64 return -ECANCELED; 65 } 66 67 amdgpu_sync_create(&p->sync); 68 return 0; 69 } 70 71 static int amdgpu_cs_job_idx(struct amdgpu_cs_parser *p, 72 struct drm_amdgpu_cs_chunk_ib *chunk_ib) 73 { 74 struct drm_sched_entity *entity; 75 unsigned int i; 76 int r; 77 78 r = amdgpu_ctx_get_entity(p->ctx, chunk_ib->ip_type, 79 chunk_ib->ip_instance, 80 chunk_ib->ring, &entity); 81 if (r) 82 return r; 83 84 /* 85 * Abort if there is no run queue associated with this entity. 86 * Possibly because of disabled HW IP. 87 */ 88 if (entity->rq == NULL) 89 return -EINVAL; 90 91 /* Check if we can add this IB to some existing job */ 92 for (i = 0; i < p->gang_size; ++i) 93 if (p->entities[i] == entity) 94 return i; 95 96 /* If not increase the gang size if possible */ 97 if (i == AMDGPU_CS_GANG_SIZE) 98 return -EINVAL; 99 100 p->entities[i] = entity; 101 p->gang_size = i + 1; 102 return i; 103 } 104 105 static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p, 106 struct drm_amdgpu_cs_chunk_ib *chunk_ib, 107 unsigned int *num_ibs) 108 { 109 int r; 110 111 r = amdgpu_cs_job_idx(p, chunk_ib); 112 if (r < 0) 113 return r; 114 115 if (num_ibs[r] >= amdgpu_ring_max_ibs(chunk_ib->ip_type)) 116 return -EINVAL; 117 118 ++(num_ibs[r]); 119 p->gang_leader_idx = r; 120 return 0; 121 } 122 123 static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p, 124 struct drm_amdgpu_cs_chunk_fence *data, 125 uint32_t *offset) 126 { 127 struct drm_gem_object *gobj; 128 struct amdgpu_bo *bo; 129 unsigned long size; 130 int r; 131 132 gobj = drm_gem_object_lookup(p->filp, data->handle); 133 if (gobj == NULL) 134 return -EINVAL; 135 136 bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); 137 p->uf_entry.priority = 0; 138 p->uf_entry.tv.bo = &bo->tbo; 139 /* One for TTM and two for the CS job */ 140 p->uf_entry.tv.num_shared = 3; 141 142 drm_gem_object_put(gobj); 143 144 size = amdgpu_bo_size(bo); 145 if (size != PAGE_SIZE || (data->offset + 8) > size) { 146 r = -EINVAL; 147 goto error_unref; 148 } 149 150 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 151 r = -EINVAL; 152 goto error_unref; 153 } 154 155 *offset = data->offset; 156 157 return 0; 158 159 error_unref: 160 amdgpu_bo_unref(&bo); 161 return r; 162 } 163 164 static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p, 165 struct drm_amdgpu_bo_list_in *data) 166 { 167 struct drm_amdgpu_bo_list_entry *info; 168 int r; 169 170 r = amdgpu_bo_create_list_entry_array(data, &info); 171 if (r) 172 return r; 173 174 r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number, 175 &p->bo_list); 176 if (r) 177 goto error_free; 178 179 kvfree(info); 180 return 0; 181 182 error_free: 183 kvfree(info); 184 185 return r; 186 } 187 188 /* Copy the data from userspace and go over it the first time */ 189 static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, 190 union drm_amdgpu_cs *cs) 191 { 192 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 193 unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { }; 194 struct amdgpu_vm *vm = &fpriv->vm; 195 uint64_t *chunk_array_user; 196 uint64_t *chunk_array; 197 uint32_t uf_offset = 0; 198 size_t size; 199 int ret; 200 int i; 201 202 chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t), 203 GFP_KERNEL); 204 if (!chunk_array) 205 return -ENOMEM; 206 207 /* get chunks */ 208 chunk_array_user = u64_to_user_ptr(cs->in.chunks); 209 if (copy_from_user(chunk_array, chunk_array_user, 210 sizeof(uint64_t)*cs->in.num_chunks)) { 211 ret = -EFAULT; 212 goto free_chunk; 213 } 214 215 p->nchunks = cs->in.num_chunks; 216 p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), 217 GFP_KERNEL); 218 if (!p->chunks) { 219 ret = -ENOMEM; 220 goto free_chunk; 221 } 222 223 for (i = 0; i < p->nchunks; i++) { 224 struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL; 225 struct drm_amdgpu_cs_chunk user_chunk; 226 uint32_t __user *cdata; 227 228 chunk_ptr = u64_to_user_ptr(chunk_array[i]); 229 if (copy_from_user(&user_chunk, chunk_ptr, 230 sizeof(struct drm_amdgpu_cs_chunk))) { 231 ret = -EFAULT; 232 i--; 233 goto free_partial_kdata; 234 } 235 p->chunks[i].chunk_id = user_chunk.chunk_id; 236 p->chunks[i].length_dw = user_chunk.length_dw; 237 238 size = p->chunks[i].length_dw; 239 cdata = u64_to_user_ptr(user_chunk.chunk_data); 240 241 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), 242 GFP_KERNEL); 243 if (p->chunks[i].kdata == NULL) { 244 ret = -ENOMEM; 245 i--; 246 goto free_partial_kdata; 247 } 248 size *= sizeof(uint32_t); 249 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { 250 ret = -EFAULT; 251 goto free_partial_kdata; 252 } 253 254 /* Assume the worst on the following checks */ 255 ret = -EINVAL; 256 switch (p->chunks[i].chunk_id) { 257 case AMDGPU_CHUNK_ID_IB: 258 if (size < sizeof(struct drm_amdgpu_cs_chunk_ib)) 259 goto free_partial_kdata; 260 261 ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, num_ibs); 262 if (ret) 263 goto free_partial_kdata; 264 break; 265 266 case AMDGPU_CHUNK_ID_FENCE: 267 if (size < sizeof(struct drm_amdgpu_cs_chunk_fence)) 268 goto free_partial_kdata; 269 270 ret = amdgpu_cs_p1_user_fence(p, p->chunks[i].kdata, 271 &uf_offset); 272 if (ret) 273 goto free_partial_kdata; 274 break; 275 276 case AMDGPU_CHUNK_ID_BO_HANDLES: 277 if (size < sizeof(struct drm_amdgpu_bo_list_in)) 278 goto free_partial_kdata; 279 280 ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata); 281 if (ret) 282 goto free_partial_kdata; 283 break; 284 285 case AMDGPU_CHUNK_ID_DEPENDENCIES: 286 case AMDGPU_CHUNK_ID_SYNCOBJ_IN: 287 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: 288 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: 289 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT: 290 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL: 291 case AMDGPU_CHUNK_ID_CP_GFX_SHADOW: 292 break; 293 294 default: 295 goto free_partial_kdata; 296 } 297 } 298 299 if (!p->gang_size) { 300 ret = -EINVAL; 301 goto free_partial_kdata; 302 } 303 304 for (i = 0; i < p->gang_size; ++i) { 305 ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm, 306 num_ibs[i], &p->jobs[i]); 307 if (ret) 308 goto free_all_kdata; 309 } 310 p->gang_leader = p->jobs[p->gang_leader_idx]; 311 312 if (p->ctx->generation != p->gang_leader->generation) { 313 ret = -ECANCELED; 314 goto free_all_kdata; 315 } 316 317 if (p->uf_entry.tv.bo) 318 p->gang_leader->uf_addr = uf_offset; 319 kvfree(chunk_array); 320 321 /* Use this opportunity to fill in task info for the vm */ 322 amdgpu_vm_set_task_info(vm); 323 324 return 0; 325 326 free_all_kdata: 327 i = p->nchunks - 1; 328 free_partial_kdata: 329 for (; i >= 0; i--) 330 kvfree(p->chunks[i].kdata); 331 kvfree(p->chunks); 332 p->chunks = NULL; 333 p->nchunks = 0; 334 free_chunk: 335 kvfree(chunk_array); 336 337 return ret; 338 } 339 340 static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p, 341 struct amdgpu_cs_chunk *chunk, 342 unsigned int *ce_preempt, 343 unsigned int *de_preempt) 344 { 345 struct drm_amdgpu_cs_chunk_ib *chunk_ib = chunk->kdata; 346 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 347 struct amdgpu_vm *vm = &fpriv->vm; 348 struct amdgpu_ring *ring; 349 struct amdgpu_job *job; 350 struct amdgpu_ib *ib; 351 int r; 352 353 r = amdgpu_cs_job_idx(p, chunk_ib); 354 if (r < 0) 355 return r; 356 357 job = p->jobs[r]; 358 ring = amdgpu_job_ring(job); 359 ib = &job->ibs[job->num_ibs++]; 360 361 /* MM engine doesn't support user fences */ 362 if (p->uf_entry.tv.bo && ring->funcs->no_user_fence) 363 return -EINVAL; 364 365 if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && 366 chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) { 367 if (chunk_ib->flags & AMDGPU_IB_FLAG_CE) 368 (*ce_preempt)++; 369 else 370 (*de_preempt)++; 371 372 /* Each GFX command submit allows only 1 IB max 373 * preemptible for CE & DE */ 374 if (*ce_preempt > 1 || *de_preempt > 1) 375 return -EINVAL; 376 } 377 378 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) 379 job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT; 380 381 r = amdgpu_ib_get(p->adev, vm, ring->funcs->parse_cs ? 382 chunk_ib->ib_bytes : 0, 383 AMDGPU_IB_POOL_DELAYED, ib); 384 if (r) { 385 DRM_ERROR("Failed to get ib !\n"); 386 return r; 387 } 388 389 ib->gpu_addr = chunk_ib->va_start; 390 ib->length_dw = chunk_ib->ib_bytes / 4; 391 ib->flags = chunk_ib->flags; 392 return 0; 393 } 394 395 static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p, 396 struct amdgpu_cs_chunk *chunk) 397 { 398 struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata; 399 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 400 unsigned int num_deps; 401 int i, r; 402 403 num_deps = chunk->length_dw * 4 / 404 sizeof(struct drm_amdgpu_cs_chunk_dep); 405 406 for (i = 0; i < num_deps; ++i) { 407 struct amdgpu_ctx *ctx; 408 struct drm_sched_entity *entity; 409 struct dma_fence *fence; 410 411 ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id); 412 if (ctx == NULL) 413 return -EINVAL; 414 415 r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type, 416 deps[i].ip_instance, 417 deps[i].ring, &entity); 418 if (r) { 419 amdgpu_ctx_put(ctx); 420 return r; 421 } 422 423 fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle); 424 amdgpu_ctx_put(ctx); 425 426 if (IS_ERR(fence)) 427 return PTR_ERR(fence); 428 else if (!fence) 429 continue; 430 431 if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) { 432 struct drm_sched_fence *s_fence; 433 struct dma_fence *old = fence; 434 435 s_fence = to_drm_sched_fence(fence); 436 fence = dma_fence_get(&s_fence->scheduled); 437 dma_fence_put(old); 438 } 439 440 r = amdgpu_sync_fence(&p->sync, fence); 441 dma_fence_put(fence); 442 if (r) 443 return r; 444 } 445 return 0; 446 } 447 448 static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p, 449 uint32_t handle, u64 point, 450 u64 flags) 451 { 452 struct dma_fence *fence; 453 int r; 454 455 r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence); 456 if (r) { 457 DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n", 458 handle, point, r); 459 return r; 460 } 461 462 r = amdgpu_sync_fence(&p->sync, fence); 463 dma_fence_put(fence); 464 return r; 465 } 466 467 static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p, 468 struct amdgpu_cs_chunk *chunk) 469 { 470 struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata; 471 unsigned int num_deps; 472 int i, r; 473 474 num_deps = chunk->length_dw * 4 / 475 sizeof(struct drm_amdgpu_cs_chunk_sem); 476 for (i = 0; i < num_deps; ++i) { 477 r = amdgpu_syncobj_lookup_and_add(p, deps[i].handle, 0, 0); 478 if (r) 479 return r; 480 } 481 482 return 0; 483 } 484 485 static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p, 486 struct amdgpu_cs_chunk *chunk) 487 { 488 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata; 489 unsigned int num_deps; 490 int i, r; 491 492 num_deps = chunk->length_dw * 4 / 493 sizeof(struct drm_amdgpu_cs_chunk_syncobj); 494 for (i = 0; i < num_deps; ++i) { 495 r = amdgpu_syncobj_lookup_and_add(p, syncobj_deps[i].handle, 496 syncobj_deps[i].point, 497 syncobj_deps[i].flags); 498 if (r) 499 return r; 500 } 501 502 return 0; 503 } 504 505 static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p, 506 struct amdgpu_cs_chunk *chunk) 507 { 508 struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata; 509 unsigned int num_deps; 510 int i; 511 512 num_deps = chunk->length_dw * 4 / 513 sizeof(struct drm_amdgpu_cs_chunk_sem); 514 515 if (p->post_deps) 516 return -EINVAL; 517 518 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), 519 GFP_KERNEL); 520 p->num_post_deps = 0; 521 522 if (!p->post_deps) 523 return -ENOMEM; 524 525 526 for (i = 0; i < num_deps; ++i) { 527 p->post_deps[i].syncobj = 528 drm_syncobj_find(p->filp, deps[i].handle); 529 if (!p->post_deps[i].syncobj) 530 return -EINVAL; 531 p->post_deps[i].chain = NULL; 532 p->post_deps[i].point = 0; 533 p->num_post_deps++; 534 } 535 536 return 0; 537 } 538 539 static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p, 540 struct amdgpu_cs_chunk *chunk) 541 { 542 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata; 543 unsigned int num_deps; 544 int i; 545 546 num_deps = chunk->length_dw * 4 / 547 sizeof(struct drm_amdgpu_cs_chunk_syncobj); 548 549 if (p->post_deps) 550 return -EINVAL; 551 552 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), 553 GFP_KERNEL); 554 p->num_post_deps = 0; 555 556 if (!p->post_deps) 557 return -ENOMEM; 558 559 for (i = 0; i < num_deps; ++i) { 560 struct amdgpu_cs_post_dep *dep = &p->post_deps[i]; 561 562 dep->chain = NULL; 563 if (syncobj_deps[i].point) { 564 dep->chain = dma_fence_chain_alloc(); 565 if (!dep->chain) 566 return -ENOMEM; 567 } 568 569 dep->syncobj = drm_syncobj_find(p->filp, 570 syncobj_deps[i].handle); 571 if (!dep->syncobj) { 572 dma_fence_chain_free(dep->chain); 573 return -EINVAL; 574 } 575 dep->point = syncobj_deps[i].point; 576 p->num_post_deps++; 577 } 578 579 return 0; 580 } 581 582 static int amdgpu_cs_p2_shadow(struct amdgpu_cs_parser *p, 583 struct amdgpu_cs_chunk *chunk) 584 { 585 struct drm_amdgpu_cs_chunk_cp_gfx_shadow *shadow = chunk->kdata; 586 int i; 587 588 if (shadow->flags & ~AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW) 589 return -EINVAL; 590 591 for (i = 0; i < p->gang_size; ++i) { 592 p->jobs[i]->shadow_va = shadow->shadow_va; 593 p->jobs[i]->csa_va = shadow->csa_va; 594 p->jobs[i]->gds_va = shadow->gds_va; 595 p->jobs[i]->init_shadow = 596 shadow->flags & AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW; 597 } 598 599 return 0; 600 } 601 602 static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p) 603 { 604 unsigned int ce_preempt = 0, de_preempt = 0; 605 int i, r; 606 607 for (i = 0; i < p->nchunks; ++i) { 608 struct amdgpu_cs_chunk *chunk; 609 610 chunk = &p->chunks[i]; 611 612 switch (chunk->chunk_id) { 613 case AMDGPU_CHUNK_ID_IB: 614 r = amdgpu_cs_p2_ib(p, chunk, &ce_preempt, &de_preempt); 615 if (r) 616 return r; 617 break; 618 case AMDGPU_CHUNK_ID_DEPENDENCIES: 619 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: 620 r = amdgpu_cs_p2_dependencies(p, chunk); 621 if (r) 622 return r; 623 break; 624 case AMDGPU_CHUNK_ID_SYNCOBJ_IN: 625 r = amdgpu_cs_p2_syncobj_in(p, chunk); 626 if (r) 627 return r; 628 break; 629 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: 630 r = amdgpu_cs_p2_syncobj_out(p, chunk); 631 if (r) 632 return r; 633 break; 634 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT: 635 r = amdgpu_cs_p2_syncobj_timeline_wait(p, chunk); 636 if (r) 637 return r; 638 break; 639 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL: 640 r = amdgpu_cs_p2_syncobj_timeline_signal(p, chunk); 641 if (r) 642 return r; 643 break; 644 case AMDGPU_CHUNK_ID_CP_GFX_SHADOW: 645 r = amdgpu_cs_p2_shadow(p, chunk); 646 if (r) 647 return r; 648 break; 649 } 650 } 651 652 return 0; 653 } 654 655 /* Convert microseconds to bytes. */ 656 static u64 us_to_bytes(struct amdgpu_device *adev, s64 us) 657 { 658 if (us <= 0 || !adev->mm_stats.log2_max_MBps) 659 return 0; 660 661 /* Since accum_us is incremented by a million per second, just 662 * multiply it by the number of MB/s to get the number of bytes. 663 */ 664 return us << adev->mm_stats.log2_max_MBps; 665 } 666 667 static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes) 668 { 669 if (!adev->mm_stats.log2_max_MBps) 670 return 0; 671 672 return bytes >> adev->mm_stats.log2_max_MBps; 673 } 674 675 /* Returns how many bytes TTM can move right now. If no bytes can be moved, 676 * it returns 0. If it returns non-zero, it's OK to move at least one buffer, 677 * which means it can go over the threshold once. If that happens, the driver 678 * will be in debt and no other buffer migrations can be done until that debt 679 * is repaid. 680 * 681 * This approach allows moving a buffer of any size (it's important to allow 682 * that). 683 * 684 * The currency is simply time in microseconds and it increases as the clock 685 * ticks. The accumulated microseconds (us) are converted to bytes and 686 * returned. 687 */ 688 static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, 689 u64 *max_bytes, 690 u64 *max_vis_bytes) 691 { 692 s64 time_us, increment_us; 693 u64 free_vram, total_vram, used_vram; 694 /* Allow a maximum of 200 accumulated ms. This is basically per-IB 695 * throttling. 696 * 697 * It means that in order to get full max MBps, at least 5 IBs per 698 * second must be submitted and not more than 200ms apart from each 699 * other. 700 */ 701 const s64 us_upper_bound = 200000; 702 703 if (!adev->mm_stats.log2_max_MBps) { 704 *max_bytes = 0; 705 *max_vis_bytes = 0; 706 return; 707 } 708 709 total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size); 710 used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager); 711 free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram; 712 713 spin_lock(&adev->mm_stats.lock); 714 715 /* Increase the amount of accumulated us. */ 716 time_us = ktime_to_us(ktime_get()); 717 increment_us = time_us - adev->mm_stats.last_update_us; 718 adev->mm_stats.last_update_us = time_us; 719 adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us, 720 us_upper_bound); 721 722 /* This prevents the short period of low performance when the VRAM 723 * usage is low and the driver is in debt or doesn't have enough 724 * accumulated us to fill VRAM quickly. 725 * 726 * The situation can occur in these cases: 727 * - a lot of VRAM is freed by userspace 728 * - the presence of a big buffer causes a lot of evictions 729 * (solution: split buffers into smaller ones) 730 * 731 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting 732 * accum_us to a positive number. 733 */ 734 if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) { 735 s64 min_us; 736 737 /* Be more aggressive on dGPUs. Try to fill a portion of free 738 * VRAM now. 739 */ 740 if (!(adev->flags & AMD_IS_APU)) 741 min_us = bytes_to_us(adev, free_vram / 4); 742 else 743 min_us = 0; /* Reset accum_us on APUs. */ 744 745 adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us); 746 } 747 748 /* This is set to 0 if the driver is in debt to disallow (optional) 749 * buffer moves. 750 */ 751 *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us); 752 753 /* Do the same for visible VRAM if half of it is free */ 754 if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) { 755 u64 total_vis_vram = adev->gmc.visible_vram_size; 756 u64 used_vis_vram = 757 amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr); 758 759 if (used_vis_vram < total_vis_vram) { 760 u64 free_vis_vram = total_vis_vram - used_vis_vram; 761 762 adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis + 763 increment_us, us_upper_bound); 764 765 if (free_vis_vram >= total_vis_vram / 2) 766 adev->mm_stats.accum_us_vis = 767 max(bytes_to_us(adev, free_vis_vram / 2), 768 adev->mm_stats.accum_us_vis); 769 } 770 771 *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis); 772 } else { 773 *max_vis_bytes = 0; 774 } 775 776 spin_unlock(&adev->mm_stats.lock); 777 } 778 779 /* Report how many bytes have really been moved for the last command 780 * submission. This can result in a debt that can stop buffer migrations 781 * temporarily. 782 */ 783 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, 784 u64 num_vis_bytes) 785 { 786 spin_lock(&adev->mm_stats.lock); 787 adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes); 788 adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes); 789 spin_unlock(&adev->mm_stats.lock); 790 } 791 792 static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo) 793 { 794 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 795 struct amdgpu_cs_parser *p = param; 796 struct ttm_operation_ctx ctx = { 797 .interruptible = true, 798 .no_wait_gpu = false, 799 .resv = bo->tbo.base.resv 800 }; 801 uint32_t domain; 802 int r; 803 804 if (bo->tbo.pin_count) 805 return 0; 806 807 /* Don't move this buffer if we have depleted our allowance 808 * to move it. Don't move anything if the threshold is zero. 809 */ 810 if (p->bytes_moved < p->bytes_moved_threshold && 811 (!bo->tbo.base.dma_buf || 812 list_empty(&bo->tbo.base.dma_buf->attachments))) { 813 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && 814 (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { 815 /* And don't move a CPU_ACCESS_REQUIRED BO to limited 816 * visible VRAM if we've depleted our allowance to do 817 * that. 818 */ 819 if (p->bytes_moved_vis < p->bytes_moved_vis_threshold) 820 domain = bo->preferred_domains; 821 else 822 domain = bo->allowed_domains; 823 } else { 824 domain = bo->preferred_domains; 825 } 826 } else { 827 domain = bo->allowed_domains; 828 } 829 830 retry: 831 amdgpu_bo_placement_from_domain(bo, domain); 832 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 833 834 p->bytes_moved += ctx.bytes_moved; 835 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && 836 amdgpu_bo_in_cpu_visible_vram(bo)) 837 p->bytes_moved_vis += ctx.bytes_moved; 838 839 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { 840 domain = bo->allowed_domains; 841 goto retry; 842 } 843 844 return r; 845 } 846 847 static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, 848 struct list_head *validated) 849 { 850 struct ttm_operation_ctx ctx = { true, false }; 851 struct amdgpu_bo_list_entry *lobj; 852 int r; 853 854 list_for_each_entry(lobj, validated, tv.head) { 855 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo); 856 struct mm_struct *usermm; 857 858 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); 859 if (usermm && usermm != current->mm) 860 return -EPERM; 861 862 if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) && 863 lobj->user_invalidated && lobj->user_pages) { 864 amdgpu_bo_placement_from_domain(bo, 865 AMDGPU_GEM_DOMAIN_CPU); 866 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 867 if (r) 868 return r; 869 870 amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, 871 lobj->user_pages); 872 } 873 874 r = amdgpu_cs_bo_validate(p, bo); 875 if (r) 876 return r; 877 878 kvfree(lobj->user_pages); 879 lobj->user_pages = NULL; 880 } 881 return 0; 882 } 883 884 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, 885 union drm_amdgpu_cs *cs) 886 { 887 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 888 struct amdgpu_vm *vm = &fpriv->vm; 889 struct amdgpu_bo_list_entry *e; 890 struct list_head duplicates; 891 unsigned int i; 892 int r; 893 894 INIT_LIST_HEAD(&p->validated); 895 896 /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */ 897 if (cs->in.bo_list_handle) { 898 if (p->bo_list) 899 return -EINVAL; 900 901 r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle, 902 &p->bo_list); 903 if (r) 904 return r; 905 } else if (!p->bo_list) { 906 /* Create a empty bo_list when no handle is provided */ 907 r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0, 908 &p->bo_list); 909 if (r) 910 return r; 911 } 912 913 mutex_lock(&p->bo_list->bo_list_mutex); 914 915 /* One for TTM and one for the CS job */ 916 amdgpu_bo_list_for_each_entry(e, p->bo_list) 917 e->tv.num_shared = 2; 918 919 amdgpu_bo_list_get_list(p->bo_list, &p->validated); 920 921 INIT_LIST_HEAD(&duplicates); 922 amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); 923 924 if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent) 925 list_add(&p->uf_entry.tv.head, &p->validated); 926 927 /* Get userptr backing pages. If pages are updated after registered 928 * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do 929 * amdgpu_ttm_backend_bind() to flush and invalidate new pages 930 */ 931 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { 932 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); 933 bool userpage_invalidated = false; 934 int i; 935 936 e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages, 937 sizeof(struct page *), 938 GFP_KERNEL | __GFP_ZERO); 939 if (!e->user_pages) { 940 DRM_ERROR("kvmalloc_array failure\n"); 941 r = -ENOMEM; 942 goto out_free_user_pages; 943 } 944 945 r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages, &e->range); 946 if (r) { 947 kvfree(e->user_pages); 948 e->user_pages = NULL; 949 goto out_free_user_pages; 950 } 951 952 for (i = 0; i < bo->tbo.ttm->num_pages; i++) { 953 if (bo->tbo.ttm->pages[i] != e->user_pages[i]) { 954 userpage_invalidated = true; 955 break; 956 } 957 } 958 e->user_invalidated = userpage_invalidated; 959 } 960 961 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, 962 &duplicates); 963 if (unlikely(r != 0)) { 964 if (r != -ERESTARTSYS) 965 DRM_ERROR("ttm_eu_reserve_buffers failed.\n"); 966 goto out_free_user_pages; 967 } 968 969 amdgpu_bo_list_for_each_entry(e, p->bo_list) { 970 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); 971 972 e->bo_va = amdgpu_vm_bo_find(vm, bo); 973 } 974 975 amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold, 976 &p->bytes_moved_vis_threshold); 977 p->bytes_moved = 0; 978 p->bytes_moved_vis = 0; 979 980 r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm, 981 amdgpu_cs_bo_validate, p); 982 if (r) { 983 DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n"); 984 goto error_validate; 985 } 986 987 r = amdgpu_cs_list_validate(p, &duplicates); 988 if (r) 989 goto error_validate; 990 991 r = amdgpu_cs_list_validate(p, &p->validated); 992 if (r) 993 goto error_validate; 994 995 if (p->uf_entry.tv.bo) { 996 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo); 997 998 r = amdgpu_ttm_alloc_gart(&uf->tbo); 999 if (r) 1000 goto error_validate; 1001 1002 p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(uf); 1003 } 1004 1005 amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved, 1006 p->bytes_moved_vis); 1007 1008 for (i = 0; i < p->gang_size; ++i) 1009 amdgpu_job_set_resources(p->jobs[i], p->bo_list->gds_obj, 1010 p->bo_list->gws_obj, 1011 p->bo_list->oa_obj); 1012 return 0; 1013 1014 error_validate: 1015 ttm_eu_backoff_reservation(&p->ticket, &p->validated); 1016 1017 out_free_user_pages: 1018 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { 1019 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); 1020 1021 if (!e->user_pages) 1022 continue; 1023 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range); 1024 kvfree(e->user_pages); 1025 e->user_pages = NULL; 1026 e->range = NULL; 1027 } 1028 mutex_unlock(&p->bo_list->bo_list_mutex); 1029 return r; 1030 } 1031 1032 static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *p) 1033 { 1034 int i, j; 1035 1036 if (!trace_amdgpu_cs_enabled()) 1037 return; 1038 1039 for (i = 0; i < p->gang_size; ++i) { 1040 struct amdgpu_job *job = p->jobs[i]; 1041 1042 for (j = 0; j < job->num_ibs; ++j) 1043 trace_amdgpu_cs(p, job, &job->ibs[j]); 1044 } 1045 } 1046 1047 static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p, 1048 struct amdgpu_job *job) 1049 { 1050 struct amdgpu_ring *ring = amdgpu_job_ring(job); 1051 unsigned int i; 1052 int r; 1053 1054 /* Only for UVD/VCE VM emulation */ 1055 if (!ring->funcs->parse_cs && !ring->funcs->patch_cs_in_place) 1056 return 0; 1057 1058 for (i = 0; i < job->num_ibs; ++i) { 1059 struct amdgpu_ib *ib = &job->ibs[i]; 1060 struct amdgpu_bo_va_mapping *m; 1061 struct amdgpu_bo *aobj; 1062 uint64_t va_start; 1063 uint8_t *kptr; 1064 1065 va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK; 1066 r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m); 1067 if (r) { 1068 DRM_ERROR("IB va_start is invalid\n"); 1069 return r; 1070 } 1071 1072 if ((va_start + ib->length_dw * 4) > 1073 (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) { 1074 DRM_ERROR("IB va_start+ib_bytes is invalid\n"); 1075 return -EINVAL; 1076 } 1077 1078 /* the IB should be reserved at this point */ 1079 r = amdgpu_bo_kmap(aobj, (void **)&kptr); 1080 if (r) 1081 return r; 1082 1083 kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE); 1084 1085 if (ring->funcs->parse_cs) { 1086 memcpy(ib->ptr, kptr, ib->length_dw * 4); 1087 amdgpu_bo_kunmap(aobj); 1088 1089 r = amdgpu_ring_parse_cs(ring, p, job, ib); 1090 if (r) 1091 return r; 1092 } else { 1093 ib->ptr = (uint32_t *)kptr; 1094 r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib); 1095 amdgpu_bo_kunmap(aobj); 1096 if (r) 1097 return r; 1098 } 1099 } 1100 1101 return 0; 1102 } 1103 1104 static int amdgpu_cs_patch_jobs(struct amdgpu_cs_parser *p) 1105 { 1106 unsigned int i; 1107 int r; 1108 1109 for (i = 0; i < p->gang_size; ++i) { 1110 r = amdgpu_cs_patch_ibs(p, p->jobs[i]); 1111 if (r) 1112 return r; 1113 } 1114 return 0; 1115 } 1116 1117 static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) 1118 { 1119 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 1120 struct amdgpu_job *job = p->gang_leader; 1121 struct amdgpu_device *adev = p->adev; 1122 struct amdgpu_vm *vm = &fpriv->vm; 1123 struct amdgpu_bo_list_entry *e; 1124 struct amdgpu_bo_va *bo_va; 1125 struct amdgpu_bo *bo; 1126 unsigned int i; 1127 int r; 1128 1129 r = amdgpu_vm_clear_freed(adev, vm, NULL); 1130 if (r) 1131 return r; 1132 1133 r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false); 1134 if (r) 1135 return r; 1136 1137 r = amdgpu_sync_fence(&p->sync, fpriv->prt_va->last_pt_update); 1138 if (r) 1139 return r; 1140 1141 if (fpriv->csa_va) { 1142 bo_va = fpriv->csa_va; 1143 BUG_ON(!bo_va); 1144 r = amdgpu_vm_bo_update(adev, bo_va, false); 1145 if (r) 1146 return r; 1147 1148 r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update); 1149 if (r) 1150 return r; 1151 } 1152 1153 amdgpu_bo_list_for_each_entry(e, p->bo_list) { 1154 /* ignore duplicates */ 1155 bo = ttm_to_amdgpu_bo(e->tv.bo); 1156 if (!bo) 1157 continue; 1158 1159 bo_va = e->bo_va; 1160 if (bo_va == NULL) 1161 continue; 1162 1163 r = amdgpu_vm_bo_update(adev, bo_va, false); 1164 if (r) 1165 return r; 1166 1167 r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update); 1168 if (r) 1169 return r; 1170 } 1171 1172 r = amdgpu_vm_handle_moved(adev, vm); 1173 if (r) 1174 return r; 1175 1176 r = amdgpu_vm_update_pdes(adev, vm, false); 1177 if (r) 1178 return r; 1179 1180 r = amdgpu_sync_fence(&p->sync, vm->last_update); 1181 if (r) 1182 return r; 1183 1184 for (i = 0; i < p->gang_size; ++i) { 1185 job = p->jobs[i]; 1186 1187 if (!job->vm) 1188 continue; 1189 1190 job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo); 1191 } 1192 1193 if (amdgpu_vm_debug) { 1194 /* Invalidate all BOs to test for userspace bugs */ 1195 amdgpu_bo_list_for_each_entry(e, p->bo_list) { 1196 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); 1197 1198 /* ignore duplicates */ 1199 if (!bo) 1200 continue; 1201 1202 amdgpu_vm_bo_invalidate(adev, bo, false); 1203 } 1204 } 1205 1206 return 0; 1207 } 1208 1209 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) 1210 { 1211 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 1212 struct drm_gpu_scheduler *sched; 1213 struct amdgpu_bo_list_entry *e; 1214 struct dma_fence *fence; 1215 unsigned int i; 1216 int r; 1217 1218 r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]); 1219 if (r) { 1220 if (r != -ERESTARTSYS) 1221 DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n"); 1222 return r; 1223 } 1224 1225 list_for_each_entry(e, &p->validated, tv.head) { 1226 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); 1227 struct dma_resv *resv = bo->tbo.base.resv; 1228 enum amdgpu_sync_mode sync_mode; 1229 1230 sync_mode = amdgpu_bo_explicit_sync(bo) ? 1231 AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER; 1232 r = amdgpu_sync_resv(p->adev, &p->sync, resv, sync_mode, 1233 &fpriv->vm); 1234 if (r) 1235 return r; 1236 } 1237 1238 for (i = 0; i < p->gang_size; ++i) { 1239 r = amdgpu_sync_push_to_job(&p->sync, p->jobs[i]); 1240 if (r) 1241 return r; 1242 } 1243 1244 sched = p->gang_leader->base.entity->rq->sched; 1245 while ((fence = amdgpu_sync_get_fence(&p->sync))) { 1246 struct drm_sched_fence *s_fence = to_drm_sched_fence(fence); 1247 1248 /* 1249 * When we have an dependency it might be necessary to insert a 1250 * pipeline sync to make sure that all caches etc are flushed and the 1251 * next job actually sees the results from the previous one 1252 * before we start executing on the same scheduler ring. 1253 */ 1254 if (!s_fence || s_fence->sched != sched) { 1255 dma_fence_put(fence); 1256 continue; 1257 } 1258 1259 r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence); 1260 dma_fence_put(fence); 1261 if (r) 1262 return r; 1263 } 1264 return 0; 1265 } 1266 1267 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) 1268 { 1269 int i; 1270 1271 for (i = 0; i < p->num_post_deps; ++i) { 1272 if (p->post_deps[i].chain && p->post_deps[i].point) { 1273 drm_syncobj_add_point(p->post_deps[i].syncobj, 1274 p->post_deps[i].chain, 1275 p->fence, p->post_deps[i].point); 1276 p->post_deps[i].chain = NULL; 1277 } else { 1278 drm_syncobj_replace_fence(p->post_deps[i].syncobj, 1279 p->fence); 1280 } 1281 } 1282 } 1283 1284 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, 1285 union drm_amdgpu_cs *cs) 1286 { 1287 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 1288 struct amdgpu_job *leader = p->gang_leader; 1289 struct amdgpu_bo_list_entry *e; 1290 unsigned int i; 1291 uint64_t seq; 1292 int r; 1293 1294 for (i = 0; i < p->gang_size; ++i) 1295 drm_sched_job_arm(&p->jobs[i]->base); 1296 1297 for (i = 0; i < p->gang_size; ++i) { 1298 struct dma_fence *fence; 1299 1300 if (p->jobs[i] == leader) 1301 continue; 1302 1303 fence = &p->jobs[i]->base.s_fence->scheduled; 1304 dma_fence_get(fence); 1305 r = drm_sched_job_add_dependency(&leader->base, fence); 1306 if (r) { 1307 dma_fence_put(fence); 1308 return r; 1309 } 1310 } 1311 1312 if (p->gang_size > 1) { 1313 for (i = 0; i < p->gang_size; ++i) 1314 amdgpu_job_set_gang_leader(p->jobs[i], leader); 1315 } 1316 1317 /* No memory allocation is allowed while holding the notifier lock. 1318 * The lock is held until amdgpu_cs_submit is finished and fence is 1319 * added to BOs. 1320 */ 1321 mutex_lock(&p->adev->notifier_lock); 1322 1323 /* If userptr are invalidated after amdgpu_cs_parser_bos(), return 1324 * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl. 1325 */ 1326 r = 0; 1327 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { 1328 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); 1329 1330 r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range); 1331 e->range = NULL; 1332 } 1333 if (r) { 1334 r = -EAGAIN; 1335 mutex_unlock(&p->adev->notifier_lock); 1336 return r; 1337 } 1338 1339 p->fence = dma_fence_get(&leader->base.s_fence->finished); 1340 list_for_each_entry(e, &p->validated, tv.head) { 1341 1342 /* Everybody except for the gang leader uses READ */ 1343 for (i = 0; i < p->gang_size; ++i) { 1344 if (p->jobs[i] == leader) 1345 continue; 1346 1347 dma_resv_add_fence(e->tv.bo->base.resv, 1348 &p->jobs[i]->base.s_fence->finished, 1349 DMA_RESV_USAGE_READ); 1350 } 1351 1352 /* The gang leader is remembered as writer */ 1353 e->tv.num_shared = 0; 1354 } 1355 1356 seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx], 1357 p->fence); 1358 amdgpu_cs_post_dependencies(p); 1359 1360 if ((leader->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) && 1361 !p->ctx->preamble_presented) { 1362 leader->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; 1363 p->ctx->preamble_presented = true; 1364 } 1365 1366 cs->out.handle = seq; 1367 leader->uf_sequence = seq; 1368 1369 amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket); 1370 for (i = 0; i < p->gang_size; ++i) { 1371 amdgpu_job_free_resources(p->jobs[i]); 1372 trace_amdgpu_cs_ioctl(p->jobs[i]); 1373 drm_sched_entity_push_job(&p->jobs[i]->base); 1374 p->jobs[i] = NULL; 1375 } 1376 1377 amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm); 1378 ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); 1379 1380 mutex_unlock(&p->adev->notifier_lock); 1381 mutex_unlock(&p->bo_list->bo_list_mutex); 1382 return 0; 1383 } 1384 1385 /* Cleanup the parser structure */ 1386 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser) 1387 { 1388 unsigned int i; 1389 1390 amdgpu_sync_free(&parser->sync); 1391 for (i = 0; i < parser->num_post_deps; i++) { 1392 drm_syncobj_put(parser->post_deps[i].syncobj); 1393 kfree(parser->post_deps[i].chain); 1394 } 1395 kfree(parser->post_deps); 1396 1397 dma_fence_put(parser->fence); 1398 1399 if (parser->ctx) 1400 amdgpu_ctx_put(parser->ctx); 1401 if (parser->bo_list) 1402 amdgpu_bo_list_put(parser->bo_list); 1403 1404 for (i = 0; i < parser->nchunks; i++) 1405 kvfree(parser->chunks[i].kdata); 1406 kvfree(parser->chunks); 1407 for (i = 0; i < parser->gang_size; ++i) { 1408 if (parser->jobs[i]) 1409 amdgpu_job_free(parser->jobs[i]); 1410 } 1411 if (parser->uf_entry.tv.bo) { 1412 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo); 1413 1414 amdgpu_bo_unref(&uf); 1415 } 1416 } 1417 1418 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 1419 { 1420 struct amdgpu_device *adev = drm_to_adev(dev); 1421 struct amdgpu_cs_parser parser; 1422 int r; 1423 1424 if (amdgpu_ras_intr_triggered()) 1425 return -EHWPOISON; 1426 1427 if (!adev->accel_working) 1428 return -EBUSY; 1429 1430 r = amdgpu_cs_parser_init(&parser, adev, filp, data); 1431 if (r) { 1432 if (printk_ratelimit()) 1433 DRM_ERROR("Failed to initialize parser %d!\n", r); 1434 return r; 1435 } 1436 1437 r = amdgpu_cs_pass1(&parser, data); 1438 if (r) 1439 goto error_fini; 1440 1441 r = amdgpu_cs_pass2(&parser); 1442 if (r) 1443 goto error_fini; 1444 1445 r = amdgpu_cs_parser_bos(&parser, data); 1446 if (r) { 1447 if (r == -ENOMEM) 1448 DRM_ERROR("Not enough memory for command submission!\n"); 1449 else if (r != -ERESTARTSYS && r != -EAGAIN) 1450 DRM_ERROR("Failed to process the buffer list %d!\n", r); 1451 goto error_fini; 1452 } 1453 1454 r = amdgpu_cs_patch_jobs(&parser); 1455 if (r) 1456 goto error_backoff; 1457 1458 r = amdgpu_cs_vm_handling(&parser); 1459 if (r) 1460 goto error_backoff; 1461 1462 r = amdgpu_cs_sync_rings(&parser); 1463 if (r) 1464 goto error_backoff; 1465 1466 trace_amdgpu_cs_ibs(&parser); 1467 1468 r = amdgpu_cs_submit(&parser, data); 1469 if (r) 1470 goto error_backoff; 1471 1472 amdgpu_cs_parser_fini(&parser); 1473 return 0; 1474 1475 error_backoff: 1476 ttm_eu_backoff_reservation(&parser.ticket, &parser.validated); 1477 mutex_unlock(&parser.bo_list->bo_list_mutex); 1478 1479 error_fini: 1480 amdgpu_cs_parser_fini(&parser); 1481 return r; 1482 } 1483 1484 /** 1485 * amdgpu_cs_wait_ioctl - wait for a command submission to finish 1486 * 1487 * @dev: drm device 1488 * @data: data from userspace 1489 * @filp: file private 1490 * 1491 * Wait for the command submission identified by handle to finish. 1492 */ 1493 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, 1494 struct drm_file *filp) 1495 { 1496 union drm_amdgpu_wait_cs *wait = data; 1497 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); 1498 struct drm_sched_entity *entity; 1499 struct amdgpu_ctx *ctx; 1500 struct dma_fence *fence; 1501 long r; 1502 1503 ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id); 1504 if (ctx == NULL) 1505 return -EINVAL; 1506 1507 r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance, 1508 wait->in.ring, &entity); 1509 if (r) { 1510 amdgpu_ctx_put(ctx); 1511 return r; 1512 } 1513 1514 fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle); 1515 if (IS_ERR(fence)) 1516 r = PTR_ERR(fence); 1517 else if (fence) { 1518 r = dma_fence_wait_timeout(fence, true, timeout); 1519 if (r > 0 && fence->error) 1520 r = fence->error; 1521 dma_fence_put(fence); 1522 } else 1523 r = 1; 1524 1525 amdgpu_ctx_put(ctx); 1526 if (r < 0) 1527 return r; 1528 1529 memset(wait, 0, sizeof(*wait)); 1530 wait->out.status = (r == 0); 1531 1532 return 0; 1533 } 1534 1535 /** 1536 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence 1537 * 1538 * @adev: amdgpu device 1539 * @filp: file private 1540 * @user: drm_amdgpu_fence copied from user space 1541 */ 1542 static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev, 1543 struct drm_file *filp, 1544 struct drm_amdgpu_fence *user) 1545 { 1546 struct drm_sched_entity *entity; 1547 struct amdgpu_ctx *ctx; 1548 struct dma_fence *fence; 1549 int r; 1550 1551 ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id); 1552 if (ctx == NULL) 1553 return ERR_PTR(-EINVAL); 1554 1555 r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance, 1556 user->ring, &entity); 1557 if (r) { 1558 amdgpu_ctx_put(ctx); 1559 return ERR_PTR(r); 1560 } 1561 1562 fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no); 1563 amdgpu_ctx_put(ctx); 1564 1565 return fence; 1566 } 1567 1568 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, 1569 struct drm_file *filp) 1570 { 1571 struct amdgpu_device *adev = drm_to_adev(dev); 1572 union drm_amdgpu_fence_to_handle *info = data; 1573 struct dma_fence *fence; 1574 struct drm_syncobj *syncobj; 1575 struct sync_file *sync_file; 1576 int fd, r; 1577 1578 fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence); 1579 if (IS_ERR(fence)) 1580 return PTR_ERR(fence); 1581 1582 if (!fence) 1583 fence = dma_fence_get_stub(); 1584 1585 switch (info->in.what) { 1586 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ: 1587 r = drm_syncobj_create(&syncobj, 0, fence); 1588 dma_fence_put(fence); 1589 if (r) 1590 return r; 1591 r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle); 1592 drm_syncobj_put(syncobj); 1593 return r; 1594 1595 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD: 1596 r = drm_syncobj_create(&syncobj, 0, fence); 1597 dma_fence_put(fence); 1598 if (r) 1599 return r; 1600 r = drm_syncobj_get_fd(syncobj, (int *)&info->out.handle); 1601 drm_syncobj_put(syncobj); 1602 return r; 1603 1604 case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD: 1605 fd = get_unused_fd_flags(O_CLOEXEC); 1606 if (fd < 0) { 1607 dma_fence_put(fence); 1608 return fd; 1609 } 1610 1611 sync_file = sync_file_create(fence); 1612 dma_fence_put(fence); 1613 if (!sync_file) { 1614 put_unused_fd(fd); 1615 return -ENOMEM; 1616 } 1617 1618 fd_install(fd, sync_file->file); 1619 info->out.handle = fd; 1620 return 0; 1621 1622 default: 1623 dma_fence_put(fence); 1624 return -EINVAL; 1625 } 1626 } 1627 1628 /** 1629 * amdgpu_cs_wait_all_fences - wait on all fences to signal 1630 * 1631 * @adev: amdgpu device 1632 * @filp: file private 1633 * @wait: wait parameters 1634 * @fences: array of drm_amdgpu_fence 1635 */ 1636 static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev, 1637 struct drm_file *filp, 1638 union drm_amdgpu_wait_fences *wait, 1639 struct drm_amdgpu_fence *fences) 1640 { 1641 uint32_t fence_count = wait->in.fence_count; 1642 unsigned int i; 1643 long r = 1; 1644 1645 for (i = 0; i < fence_count; i++) { 1646 struct dma_fence *fence; 1647 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns); 1648 1649 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); 1650 if (IS_ERR(fence)) 1651 return PTR_ERR(fence); 1652 else if (!fence) 1653 continue; 1654 1655 r = dma_fence_wait_timeout(fence, true, timeout); 1656 dma_fence_put(fence); 1657 if (r < 0) 1658 return r; 1659 1660 if (r == 0) 1661 break; 1662 1663 if (fence->error) 1664 return fence->error; 1665 } 1666 1667 memset(wait, 0, sizeof(*wait)); 1668 wait->out.status = (r > 0); 1669 1670 return 0; 1671 } 1672 1673 /** 1674 * amdgpu_cs_wait_any_fence - wait on any fence to signal 1675 * 1676 * @adev: amdgpu device 1677 * @filp: file private 1678 * @wait: wait parameters 1679 * @fences: array of drm_amdgpu_fence 1680 */ 1681 static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev, 1682 struct drm_file *filp, 1683 union drm_amdgpu_wait_fences *wait, 1684 struct drm_amdgpu_fence *fences) 1685 { 1686 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns); 1687 uint32_t fence_count = wait->in.fence_count; 1688 uint32_t first = ~0; 1689 struct dma_fence **array; 1690 unsigned int i; 1691 long r; 1692 1693 /* Prepare the fence array */ 1694 array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL); 1695 1696 if (array == NULL) 1697 return -ENOMEM; 1698 1699 for (i = 0; i < fence_count; i++) { 1700 struct dma_fence *fence; 1701 1702 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); 1703 if (IS_ERR(fence)) { 1704 r = PTR_ERR(fence); 1705 goto err_free_fence_array; 1706 } else if (fence) { 1707 array[i] = fence; 1708 } else { /* NULL, the fence has been already signaled */ 1709 r = 1; 1710 first = i; 1711 goto out; 1712 } 1713 } 1714 1715 r = dma_fence_wait_any_timeout(array, fence_count, true, timeout, 1716 &first); 1717 if (r < 0) 1718 goto err_free_fence_array; 1719 1720 out: 1721 memset(wait, 0, sizeof(*wait)); 1722 wait->out.status = (r > 0); 1723 wait->out.first_signaled = first; 1724 1725 if (first < fence_count && array[first]) 1726 r = array[first]->error; 1727 else 1728 r = 0; 1729 1730 err_free_fence_array: 1731 for (i = 0; i < fence_count; i++) 1732 dma_fence_put(array[i]); 1733 kfree(array); 1734 1735 return r; 1736 } 1737 1738 /** 1739 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish 1740 * 1741 * @dev: drm device 1742 * @data: data from userspace 1743 * @filp: file private 1744 */ 1745 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, 1746 struct drm_file *filp) 1747 { 1748 struct amdgpu_device *adev = drm_to_adev(dev); 1749 union drm_amdgpu_wait_fences *wait = data; 1750 uint32_t fence_count = wait->in.fence_count; 1751 struct drm_amdgpu_fence *fences_user; 1752 struct drm_amdgpu_fence *fences; 1753 int r; 1754 1755 /* Get the fences from userspace */ 1756 fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence), 1757 GFP_KERNEL); 1758 if (fences == NULL) 1759 return -ENOMEM; 1760 1761 fences_user = u64_to_user_ptr(wait->in.fences); 1762 if (copy_from_user(fences, fences_user, 1763 sizeof(struct drm_amdgpu_fence) * fence_count)) { 1764 r = -EFAULT; 1765 goto err_free_fences; 1766 } 1767 1768 if (wait->in.wait_all) 1769 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences); 1770 else 1771 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences); 1772 1773 err_free_fences: 1774 kfree(fences); 1775 1776 return r; 1777 } 1778 1779 /** 1780 * amdgpu_cs_find_mapping - find bo_va for VM address 1781 * 1782 * @parser: command submission parser context 1783 * @addr: VM address 1784 * @bo: resulting BO of the mapping found 1785 * @map: Placeholder to return found BO mapping 1786 * 1787 * Search the buffer objects in the command submission context for a certain 1788 * virtual memory address. Returns allocation structure when found, NULL 1789 * otherwise. 1790 */ 1791 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, 1792 uint64_t addr, struct amdgpu_bo **bo, 1793 struct amdgpu_bo_va_mapping **map) 1794 { 1795 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; 1796 struct ttm_operation_ctx ctx = { false, false }; 1797 struct amdgpu_vm *vm = &fpriv->vm; 1798 struct amdgpu_bo_va_mapping *mapping; 1799 int r; 1800 1801 addr /= AMDGPU_GPU_PAGE_SIZE; 1802 1803 mapping = amdgpu_vm_bo_lookup_mapping(vm, addr); 1804 if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo) 1805 return -EINVAL; 1806 1807 *bo = mapping->bo_va->base.bo; 1808 *map = mapping; 1809 1810 /* Double check that the BO is reserved by this CS */ 1811 if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket) 1812 return -EINVAL; 1813 1814 if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) { 1815 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 1816 amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains); 1817 r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx); 1818 if (r) 1819 return r; 1820 } 1821 1822 return amdgpu_ttm_alloc_gart(&(*bo)->tbo); 1823 } 1824