1 /* 2 * Copyright 2008 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: 25 * Jerome Glisse <glisse@freedesktop.org> 26 */ 27 28 #include <linux/file.h> 29 #include <linux/pagemap.h> 30 #include <linux/sync_file.h> 31 #include <linux/dma-buf.h> 32 33 #include <drm/amdgpu_drm.h> 34 #include <drm/drm_syncobj.h> 35 #include "amdgpu_cs.h" 36 #include "amdgpu.h" 37 #include "amdgpu_trace.h" 38 #include "amdgpu_gmc.h" 39 #include "amdgpu_gem.h" 40 #include "amdgpu_ras.h" 41 42 static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, 43 struct amdgpu_device *adev, 44 struct drm_file *filp, 45 union drm_amdgpu_cs *cs) 46 { 47 struct amdgpu_fpriv *fpriv = filp->driver_priv; 48 49 if (cs->in.num_chunks == 0) 50 return -EINVAL; 51 52 memset(p, 0, sizeof(*p)); 53 p->adev = adev; 54 p->filp = filp; 55 56 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id); 57 if (!p->ctx) 58 return -EINVAL; 59 60 if (atomic_read(&p->ctx->guilty)) { 61 amdgpu_ctx_put(p->ctx); 62 return -ECANCELED; 63 } 64 return 0; 65 } 66 67 static int amdgpu_cs_job_idx(struct amdgpu_cs_parser *p, 68 struct drm_amdgpu_cs_chunk_ib *chunk_ib) 69 { 70 struct drm_sched_entity *entity; 71 unsigned int i; 72 int r; 73 74 r = amdgpu_ctx_get_entity(p->ctx, chunk_ib->ip_type, 75 chunk_ib->ip_instance, 76 chunk_ib->ring, &entity); 77 if (r) 78 return r; 79 80 /* 81 * Abort if there is no run queue associated with this entity. 82 * Possibly because of disabled HW IP. 83 */ 84 if (entity->rq == NULL) 85 return -EINVAL; 86 87 /* Check if we can add this IB to some existing job */ 88 for (i = 0; i < p->gang_size; ++i) 89 if (p->entities[i] == entity) 90 return i; 91 92 /* If not increase the gang size if possible */ 93 if (i == AMDGPU_CS_GANG_SIZE) 94 return -EINVAL; 95 96 p->entities[i] = entity; 97 p->gang_size = i + 1; 98 return i; 99 } 100 101 static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p, 102 struct drm_amdgpu_cs_chunk_ib *chunk_ib, 103 unsigned int *num_ibs) 104 { 105 int r; 106 107 r = amdgpu_cs_job_idx(p, chunk_ib); 108 if (r < 0) 109 return r; 110 111 ++(num_ibs[r]); 112 p->gang_leader_idx = r; 113 return 0; 114 } 115 116 static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p, 117 struct drm_amdgpu_cs_chunk_fence *data, 118 uint32_t *offset) 119 { 120 struct drm_gem_object *gobj; 121 struct amdgpu_bo *bo; 122 unsigned long size; 123 int r; 124 125 gobj = drm_gem_object_lookup(p->filp, data->handle); 126 if (gobj == NULL) 127 return -EINVAL; 128 129 bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); 130 p->uf_entry.priority = 0; 131 p->uf_entry.tv.bo = &bo->tbo; 132 /* One for TTM and two for the CS job */ 133 p->uf_entry.tv.num_shared = 3; 134 135 drm_gem_object_put(gobj); 136 137 size = amdgpu_bo_size(bo); 138 if (size != PAGE_SIZE || (data->offset + 8) > size) { 139 r = -EINVAL; 140 goto error_unref; 141 } 142 143 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 144 r = -EINVAL; 145 goto error_unref; 146 } 147 148 *offset = data->offset; 149 150 return 0; 151 152 error_unref: 153 amdgpu_bo_unref(&bo); 154 return r; 155 } 156 157 static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p, 158 struct drm_amdgpu_bo_list_in *data) 159 { 160 struct drm_amdgpu_bo_list_entry *info; 161 int r; 162 163 r = amdgpu_bo_create_list_entry_array(data, &info); 164 if (r) 165 return r; 166 167 r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number, 168 &p->bo_list); 169 if (r) 170 goto error_free; 171 172 kvfree(info); 173 return 0; 174 175 error_free: 176 kvfree(info); 177 178 return r; 179 } 180 181 /* Copy the data from userspace and go over it the first time */ 182 static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, 183 union drm_amdgpu_cs *cs) 184 { 185 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 186 unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { }; 187 struct amdgpu_vm *vm = &fpriv->vm; 188 uint64_t *chunk_array_user; 189 uint64_t *chunk_array; 190 uint32_t uf_offset = 0; 191 unsigned int size; 192 int ret; 193 int i; 194 195 chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t), 196 GFP_KERNEL); 197 if (!chunk_array) 198 return -ENOMEM; 199 200 /* get chunks */ 201 chunk_array_user = u64_to_user_ptr(cs->in.chunks); 202 if (copy_from_user(chunk_array, chunk_array_user, 203 sizeof(uint64_t)*cs->in.num_chunks)) { 204 ret = -EFAULT; 205 goto free_chunk; 206 } 207 208 p->nchunks = cs->in.num_chunks; 209 p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), 210 GFP_KERNEL); 211 if (!p->chunks) { 212 ret = -ENOMEM; 213 goto free_chunk; 214 } 215 216 for (i = 0; i < p->nchunks; i++) { 217 struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL; 218 struct drm_amdgpu_cs_chunk user_chunk; 219 uint32_t __user *cdata; 220 221 chunk_ptr = u64_to_user_ptr(chunk_array[i]); 222 if (copy_from_user(&user_chunk, chunk_ptr, 223 sizeof(struct drm_amdgpu_cs_chunk))) { 224 ret = -EFAULT; 225 i--; 226 goto free_partial_kdata; 227 } 228 p->chunks[i].chunk_id = user_chunk.chunk_id; 229 p->chunks[i].length_dw = user_chunk.length_dw; 230 231 size = p->chunks[i].length_dw; 232 cdata = u64_to_user_ptr(user_chunk.chunk_data); 233 234 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), 235 GFP_KERNEL); 236 if (p->chunks[i].kdata == NULL) { 237 ret = -ENOMEM; 238 i--; 239 goto free_partial_kdata; 240 } 241 size *= sizeof(uint32_t); 242 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { 243 ret = -EFAULT; 244 goto free_partial_kdata; 245 } 246 247 /* Assume the worst on the following checks */ 248 ret = -EINVAL; 249 switch (p->chunks[i].chunk_id) { 250 case AMDGPU_CHUNK_ID_IB: 251 if (size < sizeof(struct drm_amdgpu_cs_chunk_ib)) 252 goto free_partial_kdata; 253 254 ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, num_ibs); 255 if (ret) 256 goto free_partial_kdata; 257 break; 258 259 case AMDGPU_CHUNK_ID_FENCE: 260 if (size < sizeof(struct drm_amdgpu_cs_chunk_fence)) 261 goto free_partial_kdata; 262 263 ret = amdgpu_cs_p1_user_fence(p, p->chunks[i].kdata, 264 &uf_offset); 265 if (ret) 266 goto free_partial_kdata; 267 break; 268 269 case AMDGPU_CHUNK_ID_BO_HANDLES: 270 if (size < sizeof(struct drm_amdgpu_bo_list_in)) 271 goto free_partial_kdata; 272 273 ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata); 274 if (ret) 275 goto free_partial_kdata; 276 break; 277 278 case AMDGPU_CHUNK_ID_DEPENDENCIES: 279 case AMDGPU_CHUNK_ID_SYNCOBJ_IN: 280 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: 281 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: 282 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT: 283 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL: 284 break; 285 286 default: 287 goto free_partial_kdata; 288 } 289 } 290 291 if (!p->gang_size) { 292 ret = -EINVAL; 293 goto free_partial_kdata; 294 } 295 296 for (i = 0; i < p->gang_size; ++i) { 297 ret = amdgpu_job_alloc(p->adev, num_ibs[i], &p->jobs[i], vm); 298 if (ret) 299 goto free_all_kdata; 300 301 ret = drm_sched_job_init(&p->jobs[i]->base, p->entities[i], 302 &fpriv->vm); 303 if (ret) 304 goto free_all_kdata; 305 } 306 p->gang_leader = p->jobs[p->gang_leader_idx]; 307 308 if (p->ctx->vram_lost_counter != p->gang_leader->vram_lost_counter) { 309 ret = -ECANCELED; 310 goto free_all_kdata; 311 } 312 313 if (p->uf_entry.tv.bo) 314 p->gang_leader->uf_addr = uf_offset; 315 kvfree(chunk_array); 316 317 /* Use this opportunity to fill in task info for the vm */ 318 amdgpu_vm_set_task_info(vm); 319 320 return 0; 321 322 free_all_kdata: 323 i = p->nchunks - 1; 324 free_partial_kdata: 325 for (; i >= 0; i--) 326 kvfree(p->chunks[i].kdata); 327 kvfree(p->chunks); 328 p->chunks = NULL; 329 p->nchunks = 0; 330 free_chunk: 331 kvfree(chunk_array); 332 333 return ret; 334 } 335 336 static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p, 337 struct amdgpu_cs_chunk *chunk, 338 unsigned int *ce_preempt, 339 unsigned int *de_preempt) 340 { 341 struct drm_amdgpu_cs_chunk_ib *chunk_ib = chunk->kdata; 342 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 343 struct amdgpu_vm *vm = &fpriv->vm; 344 struct amdgpu_ring *ring; 345 struct amdgpu_job *job; 346 struct amdgpu_ib *ib; 347 int r; 348 349 r = amdgpu_cs_job_idx(p, chunk_ib); 350 if (r < 0) 351 return r; 352 353 job = p->jobs[r]; 354 ring = amdgpu_job_ring(job); 355 ib = &job->ibs[job->num_ibs++]; 356 357 /* MM engine doesn't support user fences */ 358 if (p->uf_entry.tv.bo && ring->funcs->no_user_fence) 359 return -EINVAL; 360 361 if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && 362 chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) { 363 if (chunk_ib->flags & AMDGPU_IB_FLAG_CE) 364 (*ce_preempt)++; 365 else 366 (*de_preempt)++; 367 368 /* Each GFX command submit allows only 1 IB max 369 * preemptible for CE & DE */ 370 if (*ce_preempt > 1 || *de_preempt > 1) 371 return -EINVAL; 372 } 373 374 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) 375 job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT; 376 377 r = amdgpu_ib_get(p->adev, vm, ring->funcs->parse_cs ? 378 chunk_ib->ib_bytes : 0, 379 AMDGPU_IB_POOL_DELAYED, ib); 380 if (r) { 381 DRM_ERROR("Failed to get ib !\n"); 382 return r; 383 } 384 385 ib->gpu_addr = chunk_ib->va_start; 386 ib->length_dw = chunk_ib->ib_bytes / 4; 387 ib->flags = chunk_ib->flags; 388 return 0; 389 } 390 391 static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p, 392 struct amdgpu_cs_chunk *chunk) 393 { 394 struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata; 395 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 396 unsigned num_deps; 397 int i, r; 398 399 num_deps = chunk->length_dw * 4 / 400 sizeof(struct drm_amdgpu_cs_chunk_dep); 401 402 for (i = 0; i < num_deps; ++i) { 403 struct amdgpu_ctx *ctx; 404 struct drm_sched_entity *entity; 405 struct dma_fence *fence; 406 407 ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id); 408 if (ctx == NULL) 409 return -EINVAL; 410 411 r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type, 412 deps[i].ip_instance, 413 deps[i].ring, &entity); 414 if (r) { 415 amdgpu_ctx_put(ctx); 416 return r; 417 } 418 419 fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle); 420 amdgpu_ctx_put(ctx); 421 422 if (IS_ERR(fence)) 423 return PTR_ERR(fence); 424 else if (!fence) 425 continue; 426 427 if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) { 428 struct drm_sched_fence *s_fence; 429 struct dma_fence *old = fence; 430 431 s_fence = to_drm_sched_fence(fence); 432 fence = dma_fence_get(&s_fence->scheduled); 433 dma_fence_put(old); 434 } 435 436 r = amdgpu_sync_fence(&p->gang_leader->sync, fence); 437 dma_fence_put(fence); 438 if (r) 439 return r; 440 } 441 return 0; 442 } 443 444 static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p, 445 uint32_t handle, u64 point, 446 u64 flags) 447 { 448 struct dma_fence *fence; 449 int r; 450 451 r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence); 452 if (r) { 453 DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n", 454 handle, point, r); 455 return r; 456 } 457 458 r = amdgpu_sync_fence(&p->gang_leader->sync, fence); 459 dma_fence_put(fence); 460 461 return r; 462 } 463 464 static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p, 465 struct amdgpu_cs_chunk *chunk) 466 { 467 struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata; 468 unsigned num_deps; 469 int i, r; 470 471 num_deps = chunk->length_dw * 4 / 472 sizeof(struct drm_amdgpu_cs_chunk_sem); 473 for (i = 0; i < num_deps; ++i) { 474 r = amdgpu_syncobj_lookup_and_add(p, deps[i].handle, 0, 0); 475 if (r) 476 return r; 477 } 478 479 return 0; 480 } 481 482 static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p, 483 struct amdgpu_cs_chunk *chunk) 484 { 485 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata; 486 unsigned num_deps; 487 int i, r; 488 489 num_deps = chunk->length_dw * 4 / 490 sizeof(struct drm_amdgpu_cs_chunk_syncobj); 491 for (i = 0; i < num_deps; ++i) { 492 r = amdgpu_syncobj_lookup_and_add(p, syncobj_deps[i].handle, 493 syncobj_deps[i].point, 494 syncobj_deps[i].flags); 495 if (r) 496 return r; 497 } 498 499 return 0; 500 } 501 502 static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p, 503 struct amdgpu_cs_chunk *chunk) 504 { 505 struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata; 506 unsigned num_deps; 507 int i; 508 509 num_deps = chunk->length_dw * 4 / 510 sizeof(struct drm_amdgpu_cs_chunk_sem); 511 512 if (p->post_deps) 513 return -EINVAL; 514 515 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), 516 GFP_KERNEL); 517 p->num_post_deps = 0; 518 519 if (!p->post_deps) 520 return -ENOMEM; 521 522 523 for (i = 0; i < num_deps; ++i) { 524 p->post_deps[i].syncobj = 525 drm_syncobj_find(p->filp, deps[i].handle); 526 if (!p->post_deps[i].syncobj) 527 return -EINVAL; 528 p->post_deps[i].chain = NULL; 529 p->post_deps[i].point = 0; 530 p->num_post_deps++; 531 } 532 533 return 0; 534 } 535 536 static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p, 537 struct amdgpu_cs_chunk *chunk) 538 { 539 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata; 540 unsigned num_deps; 541 int i; 542 543 num_deps = chunk->length_dw * 4 / 544 sizeof(struct drm_amdgpu_cs_chunk_syncobj); 545 546 if (p->post_deps) 547 return -EINVAL; 548 549 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), 550 GFP_KERNEL); 551 p->num_post_deps = 0; 552 553 if (!p->post_deps) 554 return -ENOMEM; 555 556 for (i = 0; i < num_deps; ++i) { 557 struct amdgpu_cs_post_dep *dep = &p->post_deps[i]; 558 559 dep->chain = NULL; 560 if (syncobj_deps[i].point) { 561 dep->chain = dma_fence_chain_alloc(); 562 if (!dep->chain) 563 return -ENOMEM; 564 } 565 566 dep->syncobj = drm_syncobj_find(p->filp, 567 syncobj_deps[i].handle); 568 if (!dep->syncobj) { 569 dma_fence_chain_free(dep->chain); 570 return -EINVAL; 571 } 572 dep->point = syncobj_deps[i].point; 573 p->num_post_deps++; 574 } 575 576 return 0; 577 } 578 579 static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p) 580 { 581 unsigned int ce_preempt = 0, de_preempt = 0; 582 int i, r; 583 584 for (i = 0; i < p->nchunks; ++i) { 585 struct amdgpu_cs_chunk *chunk; 586 587 chunk = &p->chunks[i]; 588 589 switch (chunk->chunk_id) { 590 case AMDGPU_CHUNK_ID_IB: 591 r = amdgpu_cs_p2_ib(p, chunk, &ce_preempt, &de_preempt); 592 if (r) 593 return r; 594 break; 595 case AMDGPU_CHUNK_ID_DEPENDENCIES: 596 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: 597 r = amdgpu_cs_p2_dependencies(p, chunk); 598 if (r) 599 return r; 600 break; 601 case AMDGPU_CHUNK_ID_SYNCOBJ_IN: 602 r = amdgpu_cs_p2_syncobj_in(p, chunk); 603 if (r) 604 return r; 605 break; 606 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: 607 r = amdgpu_cs_p2_syncobj_out(p, chunk); 608 if (r) 609 return r; 610 break; 611 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT: 612 r = amdgpu_cs_p2_syncobj_timeline_wait(p, chunk); 613 if (r) 614 return r; 615 break; 616 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL: 617 r = amdgpu_cs_p2_syncobj_timeline_signal(p, chunk); 618 if (r) 619 return r; 620 break; 621 } 622 } 623 624 return 0; 625 } 626 627 /* Convert microseconds to bytes. */ 628 static u64 us_to_bytes(struct amdgpu_device *adev, s64 us) 629 { 630 if (us <= 0 || !adev->mm_stats.log2_max_MBps) 631 return 0; 632 633 /* Since accum_us is incremented by a million per second, just 634 * multiply it by the number of MB/s to get the number of bytes. 635 */ 636 return us << adev->mm_stats.log2_max_MBps; 637 } 638 639 static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes) 640 { 641 if (!adev->mm_stats.log2_max_MBps) 642 return 0; 643 644 return bytes >> adev->mm_stats.log2_max_MBps; 645 } 646 647 /* Returns how many bytes TTM can move right now. If no bytes can be moved, 648 * it returns 0. If it returns non-zero, it's OK to move at least one buffer, 649 * which means it can go over the threshold once. If that happens, the driver 650 * will be in debt and no other buffer migrations can be done until that debt 651 * is repaid. 652 * 653 * This approach allows moving a buffer of any size (it's important to allow 654 * that). 655 * 656 * The currency is simply time in microseconds and it increases as the clock 657 * ticks. The accumulated microseconds (us) are converted to bytes and 658 * returned. 659 */ 660 static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, 661 u64 *max_bytes, 662 u64 *max_vis_bytes) 663 { 664 s64 time_us, increment_us; 665 u64 free_vram, total_vram, used_vram; 666 /* Allow a maximum of 200 accumulated ms. This is basically per-IB 667 * throttling. 668 * 669 * It means that in order to get full max MBps, at least 5 IBs per 670 * second must be submitted and not more than 200ms apart from each 671 * other. 672 */ 673 const s64 us_upper_bound = 200000; 674 675 if (!adev->mm_stats.log2_max_MBps) { 676 *max_bytes = 0; 677 *max_vis_bytes = 0; 678 return; 679 } 680 681 total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size); 682 used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager); 683 free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram; 684 685 spin_lock(&adev->mm_stats.lock); 686 687 /* Increase the amount of accumulated us. */ 688 time_us = ktime_to_us(ktime_get()); 689 increment_us = time_us - adev->mm_stats.last_update_us; 690 adev->mm_stats.last_update_us = time_us; 691 adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us, 692 us_upper_bound); 693 694 /* This prevents the short period of low performance when the VRAM 695 * usage is low and the driver is in debt or doesn't have enough 696 * accumulated us to fill VRAM quickly. 697 * 698 * The situation can occur in these cases: 699 * - a lot of VRAM is freed by userspace 700 * - the presence of a big buffer causes a lot of evictions 701 * (solution: split buffers into smaller ones) 702 * 703 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting 704 * accum_us to a positive number. 705 */ 706 if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) { 707 s64 min_us; 708 709 /* Be more aggressive on dGPUs. Try to fill a portion of free 710 * VRAM now. 711 */ 712 if (!(adev->flags & AMD_IS_APU)) 713 min_us = bytes_to_us(adev, free_vram / 4); 714 else 715 min_us = 0; /* Reset accum_us on APUs. */ 716 717 adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us); 718 } 719 720 /* This is set to 0 if the driver is in debt to disallow (optional) 721 * buffer moves. 722 */ 723 *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us); 724 725 /* Do the same for visible VRAM if half of it is free */ 726 if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) { 727 u64 total_vis_vram = adev->gmc.visible_vram_size; 728 u64 used_vis_vram = 729 amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr); 730 731 if (used_vis_vram < total_vis_vram) { 732 u64 free_vis_vram = total_vis_vram - used_vis_vram; 733 adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis + 734 increment_us, us_upper_bound); 735 736 if (free_vis_vram >= total_vis_vram / 2) 737 adev->mm_stats.accum_us_vis = 738 max(bytes_to_us(adev, free_vis_vram / 2), 739 adev->mm_stats.accum_us_vis); 740 } 741 742 *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis); 743 } else { 744 *max_vis_bytes = 0; 745 } 746 747 spin_unlock(&adev->mm_stats.lock); 748 } 749 750 /* Report how many bytes have really been moved for the last command 751 * submission. This can result in a debt that can stop buffer migrations 752 * temporarily. 753 */ 754 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, 755 u64 num_vis_bytes) 756 { 757 spin_lock(&adev->mm_stats.lock); 758 adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes); 759 adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes); 760 spin_unlock(&adev->mm_stats.lock); 761 } 762 763 static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo) 764 { 765 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 766 struct amdgpu_cs_parser *p = param; 767 struct ttm_operation_ctx ctx = { 768 .interruptible = true, 769 .no_wait_gpu = false, 770 .resv = bo->tbo.base.resv 771 }; 772 uint32_t domain; 773 int r; 774 775 if (bo->tbo.pin_count) 776 return 0; 777 778 /* Don't move this buffer if we have depleted our allowance 779 * to move it. Don't move anything if the threshold is zero. 780 */ 781 if (p->bytes_moved < p->bytes_moved_threshold && 782 (!bo->tbo.base.dma_buf || 783 list_empty(&bo->tbo.base.dma_buf->attachments))) { 784 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && 785 (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { 786 /* And don't move a CPU_ACCESS_REQUIRED BO to limited 787 * visible VRAM if we've depleted our allowance to do 788 * that. 789 */ 790 if (p->bytes_moved_vis < p->bytes_moved_vis_threshold) 791 domain = bo->preferred_domains; 792 else 793 domain = bo->allowed_domains; 794 } else { 795 domain = bo->preferred_domains; 796 } 797 } else { 798 domain = bo->allowed_domains; 799 } 800 801 retry: 802 amdgpu_bo_placement_from_domain(bo, domain); 803 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 804 805 p->bytes_moved += ctx.bytes_moved; 806 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && 807 amdgpu_bo_in_cpu_visible_vram(bo)) 808 p->bytes_moved_vis += ctx.bytes_moved; 809 810 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { 811 domain = bo->allowed_domains; 812 goto retry; 813 } 814 815 return r; 816 } 817 818 static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, 819 struct list_head *validated) 820 { 821 struct ttm_operation_ctx ctx = { true, false }; 822 struct amdgpu_bo_list_entry *lobj; 823 int r; 824 825 list_for_each_entry(lobj, validated, tv.head) { 826 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo); 827 struct mm_struct *usermm; 828 829 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); 830 if (usermm && usermm != current->mm) 831 return -EPERM; 832 833 if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) && 834 lobj->user_invalidated && lobj->user_pages) { 835 amdgpu_bo_placement_from_domain(bo, 836 AMDGPU_GEM_DOMAIN_CPU); 837 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 838 if (r) 839 return r; 840 841 amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, 842 lobj->user_pages); 843 } 844 845 r = amdgpu_cs_bo_validate(p, bo); 846 if (r) 847 return r; 848 849 kvfree(lobj->user_pages); 850 lobj->user_pages = NULL; 851 } 852 return 0; 853 } 854 855 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, 856 union drm_amdgpu_cs *cs) 857 { 858 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 859 struct amdgpu_vm *vm = &fpriv->vm; 860 struct amdgpu_bo_list_entry *e; 861 struct list_head duplicates; 862 unsigned int i; 863 int r; 864 865 INIT_LIST_HEAD(&p->validated); 866 867 /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */ 868 if (cs->in.bo_list_handle) { 869 if (p->bo_list) 870 return -EINVAL; 871 872 r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle, 873 &p->bo_list); 874 if (r) 875 return r; 876 } else if (!p->bo_list) { 877 /* Create a empty bo_list when no handle is provided */ 878 r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0, 879 &p->bo_list); 880 if (r) 881 return r; 882 } 883 884 mutex_lock(&p->bo_list->bo_list_mutex); 885 886 /* One for TTM and one for the CS job */ 887 amdgpu_bo_list_for_each_entry(e, p->bo_list) 888 e->tv.num_shared = 2; 889 890 amdgpu_bo_list_get_list(p->bo_list, &p->validated); 891 892 INIT_LIST_HEAD(&duplicates); 893 amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); 894 895 if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent) 896 list_add(&p->uf_entry.tv.head, &p->validated); 897 898 /* Get userptr backing pages. If pages are updated after registered 899 * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do 900 * amdgpu_ttm_backend_bind() to flush and invalidate new pages 901 */ 902 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { 903 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); 904 bool userpage_invalidated = false; 905 int i; 906 907 e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages, 908 sizeof(struct page *), 909 GFP_KERNEL | __GFP_ZERO); 910 if (!e->user_pages) { 911 DRM_ERROR("kvmalloc_array failure\n"); 912 r = -ENOMEM; 913 goto out_free_user_pages; 914 } 915 916 r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages); 917 if (r) { 918 kvfree(e->user_pages); 919 e->user_pages = NULL; 920 goto out_free_user_pages; 921 } 922 923 for (i = 0; i < bo->tbo.ttm->num_pages; i++) { 924 if (bo->tbo.ttm->pages[i] != e->user_pages[i]) { 925 userpage_invalidated = true; 926 break; 927 } 928 } 929 e->user_invalidated = userpage_invalidated; 930 } 931 932 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, 933 &duplicates); 934 if (unlikely(r != 0)) { 935 if (r != -ERESTARTSYS) 936 DRM_ERROR("ttm_eu_reserve_buffers failed.\n"); 937 goto out_free_user_pages; 938 } 939 940 amdgpu_bo_list_for_each_entry(e, p->bo_list) { 941 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); 942 943 e->bo_va = amdgpu_vm_bo_find(vm, bo); 944 } 945 946 amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold, 947 &p->bytes_moved_vis_threshold); 948 p->bytes_moved = 0; 949 p->bytes_moved_vis = 0; 950 951 r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm, 952 amdgpu_cs_bo_validate, p); 953 if (r) { 954 DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n"); 955 goto error_validate; 956 } 957 958 r = amdgpu_cs_list_validate(p, &duplicates); 959 if (r) 960 goto error_validate; 961 962 r = amdgpu_cs_list_validate(p, &p->validated); 963 if (r) 964 goto error_validate; 965 966 if (p->uf_entry.tv.bo) { 967 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo); 968 969 r = amdgpu_ttm_alloc_gart(&uf->tbo); 970 if (r) 971 goto error_validate; 972 973 p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(uf); 974 } 975 976 amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved, 977 p->bytes_moved_vis); 978 979 for (i = 0; i < p->gang_size; ++i) 980 amdgpu_job_set_resources(p->jobs[i], p->bo_list->gds_obj, 981 p->bo_list->gws_obj, 982 p->bo_list->oa_obj); 983 return 0; 984 985 error_validate: 986 ttm_eu_backoff_reservation(&p->ticket, &p->validated); 987 988 out_free_user_pages: 989 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { 990 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); 991 992 if (!e->user_pages) 993 continue; 994 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); 995 kvfree(e->user_pages); 996 e->user_pages = NULL; 997 } 998 mutex_unlock(&p->bo_list->bo_list_mutex); 999 return r; 1000 } 1001 1002 static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *p) 1003 { 1004 int i, j; 1005 1006 if (!trace_amdgpu_cs_enabled()) 1007 return; 1008 1009 for (i = 0; i < p->gang_size; ++i) { 1010 struct amdgpu_job *job = p->jobs[i]; 1011 1012 for (j = 0; j < job->num_ibs; ++j) 1013 trace_amdgpu_cs(p, job, &job->ibs[j]); 1014 } 1015 } 1016 1017 static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p, 1018 struct amdgpu_job *job) 1019 { 1020 struct amdgpu_ring *ring = amdgpu_job_ring(job); 1021 unsigned int i; 1022 int r; 1023 1024 /* Only for UVD/VCE VM emulation */ 1025 if (!ring->funcs->parse_cs && !ring->funcs->patch_cs_in_place) 1026 return 0; 1027 1028 for (i = 0; i < job->num_ibs; ++i) { 1029 struct amdgpu_ib *ib = &job->ibs[i]; 1030 struct amdgpu_bo_va_mapping *m; 1031 struct amdgpu_bo *aobj; 1032 uint64_t va_start; 1033 uint8_t *kptr; 1034 1035 va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK; 1036 r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m); 1037 if (r) { 1038 DRM_ERROR("IB va_start is invalid\n"); 1039 return r; 1040 } 1041 1042 if ((va_start + ib->length_dw * 4) > 1043 (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) { 1044 DRM_ERROR("IB va_start+ib_bytes is invalid\n"); 1045 return -EINVAL; 1046 } 1047 1048 /* the IB should be reserved at this point */ 1049 r = amdgpu_bo_kmap(aobj, (void **)&kptr); 1050 if (r) { 1051 return r; 1052 } 1053 1054 kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE); 1055 1056 if (ring->funcs->parse_cs) { 1057 memcpy(ib->ptr, kptr, ib->length_dw * 4); 1058 amdgpu_bo_kunmap(aobj); 1059 1060 r = amdgpu_ring_parse_cs(ring, p, job, ib); 1061 if (r) 1062 return r; 1063 } else { 1064 ib->ptr = (uint32_t *)kptr; 1065 r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib); 1066 amdgpu_bo_kunmap(aobj); 1067 if (r) 1068 return r; 1069 } 1070 } 1071 1072 return 0; 1073 } 1074 1075 static int amdgpu_cs_patch_jobs(struct amdgpu_cs_parser *p) 1076 { 1077 unsigned int i; 1078 int r; 1079 1080 for (i = 0; i < p->gang_size; ++i) { 1081 r = amdgpu_cs_patch_ibs(p, p->jobs[i]); 1082 if (r) 1083 return r; 1084 } 1085 return 0; 1086 } 1087 1088 static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) 1089 { 1090 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 1091 struct amdgpu_job *job = p->gang_leader; 1092 struct amdgpu_device *adev = p->adev; 1093 struct amdgpu_vm *vm = &fpriv->vm; 1094 struct amdgpu_bo_list_entry *e; 1095 struct amdgpu_bo_va *bo_va; 1096 struct amdgpu_bo *bo; 1097 unsigned int i; 1098 int r; 1099 1100 r = amdgpu_vm_clear_freed(adev, vm, NULL); 1101 if (r) 1102 return r; 1103 1104 r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false); 1105 if (r) 1106 return r; 1107 1108 r = amdgpu_sync_fence(&job->sync, fpriv->prt_va->last_pt_update); 1109 if (r) 1110 return r; 1111 1112 if (fpriv->csa_va) { 1113 bo_va = fpriv->csa_va; 1114 BUG_ON(!bo_va); 1115 r = amdgpu_vm_bo_update(adev, bo_va, false); 1116 if (r) 1117 return r; 1118 1119 r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update); 1120 if (r) 1121 return r; 1122 } 1123 1124 amdgpu_bo_list_for_each_entry(e, p->bo_list) { 1125 /* ignore duplicates */ 1126 bo = ttm_to_amdgpu_bo(e->tv.bo); 1127 if (!bo) 1128 continue; 1129 1130 bo_va = e->bo_va; 1131 if (bo_va == NULL) 1132 continue; 1133 1134 r = amdgpu_vm_bo_update(adev, bo_va, false); 1135 if (r) 1136 return r; 1137 1138 r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update); 1139 if (r) 1140 return r; 1141 } 1142 1143 r = amdgpu_vm_handle_moved(adev, vm); 1144 if (r) 1145 return r; 1146 1147 r = amdgpu_vm_update_pdes(adev, vm, false); 1148 if (r) 1149 return r; 1150 1151 r = amdgpu_sync_fence(&job->sync, vm->last_update); 1152 if (r) 1153 return r; 1154 1155 for (i = 0; i < p->gang_size; ++i) { 1156 job = p->jobs[i]; 1157 1158 if (!job->vm) 1159 continue; 1160 1161 job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo); 1162 } 1163 1164 if (amdgpu_vm_debug) { 1165 /* Invalidate all BOs to test for userspace bugs */ 1166 amdgpu_bo_list_for_each_entry(e, p->bo_list) { 1167 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); 1168 1169 /* ignore duplicates */ 1170 if (!bo) 1171 continue; 1172 1173 amdgpu_vm_bo_invalidate(adev, bo, false); 1174 } 1175 } 1176 1177 return 0; 1178 } 1179 1180 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) 1181 { 1182 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 1183 struct amdgpu_job *leader = p->gang_leader; 1184 struct amdgpu_bo_list_entry *e; 1185 unsigned int i; 1186 int r; 1187 1188 list_for_each_entry(e, &p->validated, tv.head) { 1189 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); 1190 struct dma_resv *resv = bo->tbo.base.resv; 1191 enum amdgpu_sync_mode sync_mode; 1192 1193 sync_mode = amdgpu_bo_explicit_sync(bo) ? 1194 AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER; 1195 r = amdgpu_sync_resv(p->adev, &leader->sync, resv, sync_mode, 1196 &fpriv->vm); 1197 if (r) 1198 return r; 1199 } 1200 1201 for (i = 0; i < p->gang_size; ++i) { 1202 if (p->jobs[i] == leader) 1203 continue; 1204 1205 r = amdgpu_sync_clone(&leader->sync, &p->jobs[i]->sync); 1206 if (r) 1207 return r; 1208 } 1209 1210 r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]); 1211 if (r && r != -ERESTARTSYS) 1212 DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n"); 1213 return r; 1214 } 1215 1216 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) 1217 { 1218 int i; 1219 1220 for (i = 0; i < p->num_post_deps; ++i) { 1221 if (p->post_deps[i].chain && p->post_deps[i].point) { 1222 drm_syncobj_add_point(p->post_deps[i].syncobj, 1223 p->post_deps[i].chain, 1224 p->fence, p->post_deps[i].point); 1225 p->post_deps[i].chain = NULL; 1226 } else { 1227 drm_syncobj_replace_fence(p->post_deps[i].syncobj, 1228 p->fence); 1229 } 1230 } 1231 } 1232 1233 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, 1234 union drm_amdgpu_cs *cs) 1235 { 1236 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 1237 struct amdgpu_job *leader = p->gang_leader; 1238 struct amdgpu_bo_list_entry *e; 1239 unsigned int i; 1240 uint64_t seq; 1241 int r; 1242 1243 for (i = 0; i < p->gang_size; ++i) 1244 drm_sched_job_arm(&p->jobs[i]->base); 1245 1246 for (i = 0; i < p->gang_size; ++i) { 1247 struct dma_fence *fence; 1248 1249 if (p->jobs[i] == leader) 1250 continue; 1251 1252 fence = &p->jobs[i]->base.s_fence->scheduled; 1253 r = amdgpu_sync_fence(&leader->sync, fence); 1254 if (r) 1255 goto error_cleanup; 1256 } 1257 1258 if (p->gang_size > 1) { 1259 for (i = 0; i < p->gang_size; ++i) 1260 amdgpu_job_set_gang_leader(p->jobs[i], leader); 1261 } 1262 1263 /* No memory allocation is allowed while holding the notifier lock. 1264 * The lock is held until amdgpu_cs_submit is finished and fence is 1265 * added to BOs. 1266 */ 1267 mutex_lock(&p->adev->notifier_lock); 1268 1269 /* If userptr are invalidated after amdgpu_cs_parser_bos(), return 1270 * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl. 1271 */ 1272 r = 0; 1273 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { 1274 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); 1275 1276 r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); 1277 } 1278 if (r) { 1279 r = -EAGAIN; 1280 goto error_unlock; 1281 } 1282 1283 p->fence = dma_fence_get(&leader->base.s_fence->finished); 1284 list_for_each_entry(e, &p->validated, tv.head) { 1285 1286 /* Everybody except for the gang leader uses READ */ 1287 for (i = 0; i < p->gang_size; ++i) { 1288 if (p->jobs[i] == leader) 1289 continue; 1290 1291 dma_resv_add_fence(e->tv.bo->base.resv, 1292 &p->jobs[i]->base.s_fence->finished, 1293 DMA_RESV_USAGE_READ); 1294 } 1295 1296 /* The gang leader is remembered as writer */ 1297 e->tv.num_shared = 0; 1298 } 1299 1300 seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx], 1301 p->fence); 1302 amdgpu_cs_post_dependencies(p); 1303 1304 if ((leader->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) && 1305 !p->ctx->preamble_presented) { 1306 leader->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; 1307 p->ctx->preamble_presented = true; 1308 } 1309 1310 cs->out.handle = seq; 1311 leader->uf_sequence = seq; 1312 1313 amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket); 1314 for (i = 0; i < p->gang_size; ++i) { 1315 amdgpu_job_free_resources(p->jobs[i]); 1316 trace_amdgpu_cs_ioctl(p->jobs[i]); 1317 drm_sched_entity_push_job(&p->jobs[i]->base); 1318 p->jobs[i] = NULL; 1319 } 1320 1321 amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm); 1322 ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); 1323 1324 mutex_unlock(&p->adev->notifier_lock); 1325 mutex_unlock(&p->bo_list->bo_list_mutex); 1326 return 0; 1327 1328 error_unlock: 1329 mutex_unlock(&p->adev->notifier_lock); 1330 1331 error_cleanup: 1332 for (i = 0; i < p->gang_size; ++i) 1333 drm_sched_job_cleanup(&p->jobs[i]->base); 1334 return r; 1335 } 1336 1337 /* Cleanup the parser structure */ 1338 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser) 1339 { 1340 unsigned i; 1341 1342 for (i = 0; i < parser->num_post_deps; i++) { 1343 drm_syncobj_put(parser->post_deps[i].syncobj); 1344 kfree(parser->post_deps[i].chain); 1345 } 1346 kfree(parser->post_deps); 1347 1348 dma_fence_put(parser->fence); 1349 1350 if (parser->ctx) 1351 amdgpu_ctx_put(parser->ctx); 1352 if (parser->bo_list) 1353 amdgpu_bo_list_put(parser->bo_list); 1354 1355 for (i = 0; i < parser->nchunks; i++) 1356 kvfree(parser->chunks[i].kdata); 1357 kvfree(parser->chunks); 1358 for (i = 0; i < parser->gang_size; ++i) { 1359 if (parser->jobs[i]) 1360 amdgpu_job_free(parser->jobs[i]); 1361 } 1362 if (parser->uf_entry.tv.bo) { 1363 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo); 1364 1365 amdgpu_bo_unref(&uf); 1366 } 1367 } 1368 1369 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 1370 { 1371 struct amdgpu_device *adev = drm_to_adev(dev); 1372 struct amdgpu_cs_parser parser; 1373 int r; 1374 1375 if (amdgpu_ras_intr_triggered()) 1376 return -EHWPOISON; 1377 1378 if (!adev->accel_working) 1379 return -EBUSY; 1380 1381 r = amdgpu_cs_parser_init(&parser, adev, filp, data); 1382 if (r) { 1383 if (printk_ratelimit()) 1384 DRM_ERROR("Failed to initialize parser %d!\n", r); 1385 return r; 1386 } 1387 1388 r = amdgpu_cs_pass1(&parser, data); 1389 if (r) 1390 goto error_fini; 1391 1392 r = amdgpu_cs_pass2(&parser); 1393 if (r) 1394 goto error_fini; 1395 1396 r = amdgpu_cs_parser_bos(&parser, data); 1397 if (r) { 1398 if (r == -ENOMEM) 1399 DRM_ERROR("Not enough memory for command submission!\n"); 1400 else if (r != -ERESTARTSYS && r != -EAGAIN) 1401 DRM_ERROR("Failed to process the buffer list %d!\n", r); 1402 goto error_fini; 1403 } 1404 1405 r = amdgpu_cs_patch_jobs(&parser); 1406 if (r) 1407 goto error_backoff; 1408 1409 r = amdgpu_cs_vm_handling(&parser); 1410 if (r) 1411 goto error_backoff; 1412 1413 r = amdgpu_cs_sync_rings(&parser); 1414 if (r) 1415 goto error_backoff; 1416 1417 trace_amdgpu_cs_ibs(&parser); 1418 1419 r = amdgpu_cs_submit(&parser, data); 1420 if (r) 1421 goto error_backoff; 1422 1423 amdgpu_cs_parser_fini(&parser); 1424 return 0; 1425 1426 error_backoff: 1427 ttm_eu_backoff_reservation(&parser.ticket, &parser.validated); 1428 mutex_unlock(&parser.bo_list->bo_list_mutex); 1429 1430 error_fini: 1431 amdgpu_cs_parser_fini(&parser); 1432 return r; 1433 } 1434 1435 /** 1436 * amdgpu_cs_wait_ioctl - wait for a command submission to finish 1437 * 1438 * @dev: drm device 1439 * @data: data from userspace 1440 * @filp: file private 1441 * 1442 * Wait for the command submission identified by handle to finish. 1443 */ 1444 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, 1445 struct drm_file *filp) 1446 { 1447 union drm_amdgpu_wait_cs *wait = data; 1448 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); 1449 struct drm_sched_entity *entity; 1450 struct amdgpu_ctx *ctx; 1451 struct dma_fence *fence; 1452 long r; 1453 1454 ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id); 1455 if (ctx == NULL) 1456 return -EINVAL; 1457 1458 r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance, 1459 wait->in.ring, &entity); 1460 if (r) { 1461 amdgpu_ctx_put(ctx); 1462 return r; 1463 } 1464 1465 fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle); 1466 if (IS_ERR(fence)) 1467 r = PTR_ERR(fence); 1468 else if (fence) { 1469 r = dma_fence_wait_timeout(fence, true, timeout); 1470 if (r > 0 && fence->error) 1471 r = fence->error; 1472 dma_fence_put(fence); 1473 } else 1474 r = 1; 1475 1476 amdgpu_ctx_put(ctx); 1477 if (r < 0) 1478 return r; 1479 1480 memset(wait, 0, sizeof(*wait)); 1481 wait->out.status = (r == 0); 1482 1483 return 0; 1484 } 1485 1486 /** 1487 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence 1488 * 1489 * @adev: amdgpu device 1490 * @filp: file private 1491 * @user: drm_amdgpu_fence copied from user space 1492 */ 1493 static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev, 1494 struct drm_file *filp, 1495 struct drm_amdgpu_fence *user) 1496 { 1497 struct drm_sched_entity *entity; 1498 struct amdgpu_ctx *ctx; 1499 struct dma_fence *fence; 1500 int r; 1501 1502 ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id); 1503 if (ctx == NULL) 1504 return ERR_PTR(-EINVAL); 1505 1506 r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance, 1507 user->ring, &entity); 1508 if (r) { 1509 amdgpu_ctx_put(ctx); 1510 return ERR_PTR(r); 1511 } 1512 1513 fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no); 1514 amdgpu_ctx_put(ctx); 1515 1516 return fence; 1517 } 1518 1519 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, 1520 struct drm_file *filp) 1521 { 1522 struct amdgpu_device *adev = drm_to_adev(dev); 1523 union drm_amdgpu_fence_to_handle *info = data; 1524 struct dma_fence *fence; 1525 struct drm_syncobj *syncobj; 1526 struct sync_file *sync_file; 1527 int fd, r; 1528 1529 fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence); 1530 if (IS_ERR(fence)) 1531 return PTR_ERR(fence); 1532 1533 if (!fence) 1534 fence = dma_fence_get_stub(); 1535 1536 switch (info->in.what) { 1537 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ: 1538 r = drm_syncobj_create(&syncobj, 0, fence); 1539 dma_fence_put(fence); 1540 if (r) 1541 return r; 1542 r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle); 1543 drm_syncobj_put(syncobj); 1544 return r; 1545 1546 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD: 1547 r = drm_syncobj_create(&syncobj, 0, fence); 1548 dma_fence_put(fence); 1549 if (r) 1550 return r; 1551 r = drm_syncobj_get_fd(syncobj, (int *)&info->out.handle); 1552 drm_syncobj_put(syncobj); 1553 return r; 1554 1555 case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD: 1556 fd = get_unused_fd_flags(O_CLOEXEC); 1557 if (fd < 0) { 1558 dma_fence_put(fence); 1559 return fd; 1560 } 1561 1562 sync_file = sync_file_create(fence); 1563 dma_fence_put(fence); 1564 if (!sync_file) { 1565 put_unused_fd(fd); 1566 return -ENOMEM; 1567 } 1568 1569 fd_install(fd, sync_file->file); 1570 info->out.handle = fd; 1571 return 0; 1572 1573 default: 1574 dma_fence_put(fence); 1575 return -EINVAL; 1576 } 1577 } 1578 1579 /** 1580 * amdgpu_cs_wait_all_fences - wait on all fences to signal 1581 * 1582 * @adev: amdgpu device 1583 * @filp: file private 1584 * @wait: wait parameters 1585 * @fences: array of drm_amdgpu_fence 1586 */ 1587 static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev, 1588 struct drm_file *filp, 1589 union drm_amdgpu_wait_fences *wait, 1590 struct drm_amdgpu_fence *fences) 1591 { 1592 uint32_t fence_count = wait->in.fence_count; 1593 unsigned int i; 1594 long r = 1; 1595 1596 for (i = 0; i < fence_count; i++) { 1597 struct dma_fence *fence; 1598 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns); 1599 1600 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); 1601 if (IS_ERR(fence)) 1602 return PTR_ERR(fence); 1603 else if (!fence) 1604 continue; 1605 1606 r = dma_fence_wait_timeout(fence, true, timeout); 1607 dma_fence_put(fence); 1608 if (r < 0) 1609 return r; 1610 1611 if (r == 0) 1612 break; 1613 1614 if (fence->error) 1615 return fence->error; 1616 } 1617 1618 memset(wait, 0, sizeof(*wait)); 1619 wait->out.status = (r > 0); 1620 1621 return 0; 1622 } 1623 1624 /** 1625 * amdgpu_cs_wait_any_fence - wait on any fence to signal 1626 * 1627 * @adev: amdgpu device 1628 * @filp: file private 1629 * @wait: wait parameters 1630 * @fences: array of drm_amdgpu_fence 1631 */ 1632 static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev, 1633 struct drm_file *filp, 1634 union drm_amdgpu_wait_fences *wait, 1635 struct drm_amdgpu_fence *fences) 1636 { 1637 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns); 1638 uint32_t fence_count = wait->in.fence_count; 1639 uint32_t first = ~0; 1640 struct dma_fence **array; 1641 unsigned int i; 1642 long r; 1643 1644 /* Prepare the fence array */ 1645 array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL); 1646 1647 if (array == NULL) 1648 return -ENOMEM; 1649 1650 for (i = 0; i < fence_count; i++) { 1651 struct dma_fence *fence; 1652 1653 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); 1654 if (IS_ERR(fence)) { 1655 r = PTR_ERR(fence); 1656 goto err_free_fence_array; 1657 } else if (fence) { 1658 array[i] = fence; 1659 } else { /* NULL, the fence has been already signaled */ 1660 r = 1; 1661 first = i; 1662 goto out; 1663 } 1664 } 1665 1666 r = dma_fence_wait_any_timeout(array, fence_count, true, timeout, 1667 &first); 1668 if (r < 0) 1669 goto err_free_fence_array; 1670 1671 out: 1672 memset(wait, 0, sizeof(*wait)); 1673 wait->out.status = (r > 0); 1674 wait->out.first_signaled = first; 1675 1676 if (first < fence_count && array[first]) 1677 r = array[first]->error; 1678 else 1679 r = 0; 1680 1681 err_free_fence_array: 1682 for (i = 0; i < fence_count; i++) 1683 dma_fence_put(array[i]); 1684 kfree(array); 1685 1686 return r; 1687 } 1688 1689 /** 1690 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish 1691 * 1692 * @dev: drm device 1693 * @data: data from userspace 1694 * @filp: file private 1695 */ 1696 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, 1697 struct drm_file *filp) 1698 { 1699 struct amdgpu_device *adev = drm_to_adev(dev); 1700 union drm_amdgpu_wait_fences *wait = data; 1701 uint32_t fence_count = wait->in.fence_count; 1702 struct drm_amdgpu_fence *fences_user; 1703 struct drm_amdgpu_fence *fences; 1704 int r; 1705 1706 /* Get the fences from userspace */ 1707 fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence), 1708 GFP_KERNEL); 1709 if (fences == NULL) 1710 return -ENOMEM; 1711 1712 fences_user = u64_to_user_ptr(wait->in.fences); 1713 if (copy_from_user(fences, fences_user, 1714 sizeof(struct drm_amdgpu_fence) * fence_count)) { 1715 r = -EFAULT; 1716 goto err_free_fences; 1717 } 1718 1719 if (wait->in.wait_all) 1720 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences); 1721 else 1722 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences); 1723 1724 err_free_fences: 1725 kfree(fences); 1726 1727 return r; 1728 } 1729 1730 /** 1731 * amdgpu_cs_find_mapping - find bo_va for VM address 1732 * 1733 * @parser: command submission parser context 1734 * @addr: VM address 1735 * @bo: resulting BO of the mapping found 1736 * @map: Placeholder to return found BO mapping 1737 * 1738 * Search the buffer objects in the command submission context for a certain 1739 * virtual memory address. Returns allocation structure when found, NULL 1740 * otherwise. 1741 */ 1742 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, 1743 uint64_t addr, struct amdgpu_bo **bo, 1744 struct amdgpu_bo_va_mapping **map) 1745 { 1746 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; 1747 struct ttm_operation_ctx ctx = { false, false }; 1748 struct amdgpu_vm *vm = &fpriv->vm; 1749 struct amdgpu_bo_va_mapping *mapping; 1750 int r; 1751 1752 addr /= AMDGPU_GPU_PAGE_SIZE; 1753 1754 mapping = amdgpu_vm_bo_lookup_mapping(vm, addr); 1755 if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo) 1756 return -EINVAL; 1757 1758 *bo = mapping->bo_va->base.bo; 1759 *map = mapping; 1760 1761 /* Double check that the BO is reserved by this CS */ 1762 if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket) 1763 return -EINVAL; 1764 1765 if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) { 1766 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 1767 amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains); 1768 r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx); 1769 if (r) 1770 return r; 1771 } 1772 1773 return amdgpu_ttm_alloc_gart(&(*bo)->tbo); 1774 } 1775