1 /* 2 * Copyright 2008 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: 25 * Jerome Glisse <glisse@freedesktop.org> 26 */ 27 #include <linux/pagemap.h> 28 #include <linux/sync_file.h> 29 #include <drm/drmP.h> 30 #include <drm/amdgpu_drm.h> 31 #include <drm/drm_syncobj.h> 32 #include "amdgpu.h" 33 #include "amdgpu_trace.h" 34 #include "amdgpu_gmc.h" 35 36 static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, 37 struct drm_amdgpu_cs_chunk_fence *data, 38 uint32_t *offset) 39 { 40 struct drm_gem_object *gobj; 41 unsigned long size; 42 43 gobj = drm_gem_object_lookup(p->filp, data->handle); 44 if (gobj == NULL) 45 return -EINVAL; 46 47 p->uf_entry.robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); 48 p->uf_entry.priority = 0; 49 p->uf_entry.tv.bo = &p->uf_entry.robj->tbo; 50 p->uf_entry.tv.shared = true; 51 p->uf_entry.user_pages = NULL; 52 53 size = amdgpu_bo_size(p->uf_entry.robj); 54 if (size != PAGE_SIZE || (data->offset + 8) > size) 55 return -EINVAL; 56 57 *offset = data->offset; 58 59 drm_gem_object_put_unlocked(gobj); 60 61 if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) { 62 amdgpu_bo_unref(&p->uf_entry.robj); 63 return -EINVAL; 64 } 65 66 return 0; 67 } 68 69 static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) 70 { 71 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 72 struct amdgpu_vm *vm = &fpriv->vm; 73 union drm_amdgpu_cs *cs = data; 74 uint64_t *chunk_array_user; 75 uint64_t *chunk_array; 76 unsigned size, num_ibs = 0; 77 uint32_t uf_offset = 0; 78 int i; 79 int ret; 80 81 if (cs->in.num_chunks == 0) 82 return 0; 83 84 chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL); 85 if (!chunk_array) 86 return -ENOMEM; 87 88 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id); 89 if (!p->ctx) { 90 ret = -EINVAL; 91 goto free_chunk; 92 } 93 94 /* skip guilty context job */ 95 if (atomic_read(&p->ctx->guilty) == 1) { 96 ret = -ECANCELED; 97 goto free_chunk; 98 } 99 100 mutex_lock(&p->ctx->lock); 101 102 /* get chunks */ 103 chunk_array_user = u64_to_user_ptr(cs->in.chunks); 104 if (copy_from_user(chunk_array, chunk_array_user, 105 sizeof(uint64_t)*cs->in.num_chunks)) { 106 ret = -EFAULT; 107 goto free_chunk; 108 } 109 110 p->nchunks = cs->in.num_chunks; 111 p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), 112 GFP_KERNEL); 113 if (!p->chunks) { 114 ret = -ENOMEM; 115 goto free_chunk; 116 } 117 118 for (i = 0; i < p->nchunks; i++) { 119 struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL; 120 struct drm_amdgpu_cs_chunk user_chunk; 121 uint32_t __user *cdata; 122 123 chunk_ptr = u64_to_user_ptr(chunk_array[i]); 124 if (copy_from_user(&user_chunk, chunk_ptr, 125 sizeof(struct drm_amdgpu_cs_chunk))) { 126 ret = -EFAULT; 127 i--; 128 goto free_partial_kdata; 129 } 130 p->chunks[i].chunk_id = user_chunk.chunk_id; 131 p->chunks[i].length_dw = user_chunk.length_dw; 132 133 size = p->chunks[i].length_dw; 134 cdata = u64_to_user_ptr(user_chunk.chunk_data); 135 136 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); 137 if (p->chunks[i].kdata == NULL) { 138 ret = -ENOMEM; 139 i--; 140 goto free_partial_kdata; 141 } 142 size *= sizeof(uint32_t); 143 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { 144 ret = -EFAULT; 145 goto free_partial_kdata; 146 } 147 148 switch (p->chunks[i].chunk_id) { 149 case AMDGPU_CHUNK_ID_IB: 150 ++num_ibs; 151 break; 152 153 case AMDGPU_CHUNK_ID_FENCE: 154 size = sizeof(struct drm_amdgpu_cs_chunk_fence); 155 if (p->chunks[i].length_dw * sizeof(uint32_t) < size) { 156 ret = -EINVAL; 157 goto free_partial_kdata; 158 } 159 160 ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata, 161 &uf_offset); 162 if (ret) 163 goto free_partial_kdata; 164 165 break; 166 167 case AMDGPU_CHUNK_ID_DEPENDENCIES: 168 case AMDGPU_CHUNK_ID_SYNCOBJ_IN: 169 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: 170 break; 171 172 default: 173 ret = -EINVAL; 174 goto free_partial_kdata; 175 } 176 } 177 178 ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm); 179 if (ret) 180 goto free_all_kdata; 181 182 if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) { 183 ret = -ECANCELED; 184 goto free_all_kdata; 185 } 186 187 if (p->uf_entry.robj) 188 p->job->uf_addr = uf_offset; 189 kfree(chunk_array); 190 191 /* Use this opportunity to fill in task info for the vm */ 192 amdgpu_vm_set_task_info(vm); 193 194 return 0; 195 196 free_all_kdata: 197 i = p->nchunks - 1; 198 free_partial_kdata: 199 for (; i >= 0; i--) 200 kvfree(p->chunks[i].kdata); 201 kfree(p->chunks); 202 p->chunks = NULL; 203 p->nchunks = 0; 204 free_chunk: 205 kfree(chunk_array); 206 207 return ret; 208 } 209 210 /* Convert microseconds to bytes. */ 211 static u64 us_to_bytes(struct amdgpu_device *adev, s64 us) 212 { 213 if (us <= 0 || !adev->mm_stats.log2_max_MBps) 214 return 0; 215 216 /* Since accum_us is incremented by a million per second, just 217 * multiply it by the number of MB/s to get the number of bytes. 218 */ 219 return us << adev->mm_stats.log2_max_MBps; 220 } 221 222 static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes) 223 { 224 if (!adev->mm_stats.log2_max_MBps) 225 return 0; 226 227 return bytes >> adev->mm_stats.log2_max_MBps; 228 } 229 230 /* Returns how many bytes TTM can move right now. If no bytes can be moved, 231 * it returns 0. If it returns non-zero, it's OK to move at least one buffer, 232 * which means it can go over the threshold once. If that happens, the driver 233 * will be in debt and no other buffer migrations can be done until that debt 234 * is repaid. 235 * 236 * This approach allows moving a buffer of any size (it's important to allow 237 * that). 238 * 239 * The currency is simply time in microseconds and it increases as the clock 240 * ticks. The accumulated microseconds (us) are converted to bytes and 241 * returned. 242 */ 243 static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, 244 u64 *max_bytes, 245 u64 *max_vis_bytes) 246 { 247 s64 time_us, increment_us; 248 u64 free_vram, total_vram, used_vram; 249 250 /* Allow a maximum of 200 accumulated ms. This is basically per-IB 251 * throttling. 252 * 253 * It means that in order to get full max MBps, at least 5 IBs per 254 * second must be submitted and not more than 200ms apart from each 255 * other. 256 */ 257 const s64 us_upper_bound = 200000; 258 259 if (!adev->mm_stats.log2_max_MBps) { 260 *max_bytes = 0; 261 *max_vis_bytes = 0; 262 return; 263 } 264 265 total_vram = adev->gmc.real_vram_size - adev->vram_pin_size; 266 used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 267 free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram; 268 269 spin_lock(&adev->mm_stats.lock); 270 271 /* Increase the amount of accumulated us. */ 272 time_us = ktime_to_us(ktime_get()); 273 increment_us = time_us - adev->mm_stats.last_update_us; 274 adev->mm_stats.last_update_us = time_us; 275 adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us, 276 us_upper_bound); 277 278 /* This prevents the short period of low performance when the VRAM 279 * usage is low and the driver is in debt or doesn't have enough 280 * accumulated us to fill VRAM quickly. 281 * 282 * The situation can occur in these cases: 283 * - a lot of VRAM is freed by userspace 284 * - the presence of a big buffer causes a lot of evictions 285 * (solution: split buffers into smaller ones) 286 * 287 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting 288 * accum_us to a positive number. 289 */ 290 if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) { 291 s64 min_us; 292 293 /* Be more aggresive on dGPUs. Try to fill a portion of free 294 * VRAM now. 295 */ 296 if (!(adev->flags & AMD_IS_APU)) 297 min_us = bytes_to_us(adev, free_vram / 4); 298 else 299 min_us = 0; /* Reset accum_us on APUs. */ 300 301 adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us); 302 } 303 304 /* This is set to 0 if the driver is in debt to disallow (optional) 305 * buffer moves. 306 */ 307 *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us); 308 309 /* Do the same for visible VRAM if half of it is free */ 310 if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) { 311 u64 total_vis_vram = adev->gmc.visible_vram_size; 312 u64 used_vis_vram = 313 amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 314 315 if (used_vis_vram < total_vis_vram) { 316 u64 free_vis_vram = total_vis_vram - used_vis_vram; 317 adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis + 318 increment_us, us_upper_bound); 319 320 if (free_vis_vram >= total_vis_vram / 2) 321 adev->mm_stats.accum_us_vis = 322 max(bytes_to_us(adev, free_vis_vram / 2), 323 adev->mm_stats.accum_us_vis); 324 } 325 326 *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis); 327 } else { 328 *max_vis_bytes = 0; 329 } 330 331 spin_unlock(&adev->mm_stats.lock); 332 } 333 334 /* Report how many bytes have really been moved for the last command 335 * submission. This can result in a debt that can stop buffer migrations 336 * temporarily. 337 */ 338 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, 339 u64 num_vis_bytes) 340 { 341 spin_lock(&adev->mm_stats.lock); 342 adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes); 343 adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes); 344 spin_unlock(&adev->mm_stats.lock); 345 } 346 347 static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, 348 struct amdgpu_bo *bo) 349 { 350 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 351 struct ttm_operation_ctx ctx = { 352 .interruptible = true, 353 .no_wait_gpu = false, 354 .resv = bo->tbo.resv, 355 .flags = 0 356 }; 357 uint32_t domain; 358 int r; 359 360 if (bo->pin_count) 361 return 0; 362 363 /* Don't move this buffer if we have depleted our allowance 364 * to move it. Don't move anything if the threshold is zero. 365 */ 366 if (p->bytes_moved < p->bytes_moved_threshold) { 367 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && 368 (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { 369 /* And don't move a CPU_ACCESS_REQUIRED BO to limited 370 * visible VRAM if we've depleted our allowance to do 371 * that. 372 */ 373 if (p->bytes_moved_vis < p->bytes_moved_vis_threshold) 374 domain = bo->preferred_domains; 375 else 376 domain = bo->allowed_domains; 377 } else { 378 domain = bo->preferred_domains; 379 } 380 } else { 381 domain = bo->allowed_domains; 382 } 383 384 retry: 385 amdgpu_ttm_placement_from_domain(bo, domain); 386 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 387 388 p->bytes_moved += ctx.bytes_moved; 389 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && 390 amdgpu_bo_in_cpu_visible_vram(bo)) 391 p->bytes_moved_vis += ctx.bytes_moved; 392 393 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { 394 domain = bo->allowed_domains; 395 goto retry; 396 } 397 398 return r; 399 } 400 401 /* Last resort, try to evict something from the current working set */ 402 static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, 403 struct amdgpu_bo *validated) 404 { 405 uint32_t domain = validated->allowed_domains; 406 struct ttm_operation_ctx ctx = { true, false }; 407 int r; 408 409 if (!p->evictable) 410 return false; 411 412 for (;&p->evictable->tv.head != &p->validated; 413 p->evictable = list_prev_entry(p->evictable, tv.head)) { 414 415 struct amdgpu_bo_list_entry *candidate = p->evictable; 416 struct amdgpu_bo *bo = candidate->robj; 417 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 418 bool update_bytes_moved_vis; 419 uint32_t other; 420 421 /* If we reached our current BO we can forget it */ 422 if (candidate->robj == validated) 423 break; 424 425 /* We can't move pinned BOs here */ 426 if (bo->pin_count) 427 continue; 428 429 other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); 430 431 /* Check if this BO is in one of the domains we need space for */ 432 if (!(other & domain)) 433 continue; 434 435 /* Check if we can move this BO somewhere else */ 436 other = bo->allowed_domains & ~domain; 437 if (!other) 438 continue; 439 440 /* Good we can try to move this BO somewhere else */ 441 update_bytes_moved_vis = 442 !amdgpu_gmc_vram_full_visible(&adev->gmc) && 443 amdgpu_bo_in_cpu_visible_vram(bo); 444 amdgpu_ttm_placement_from_domain(bo, other); 445 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 446 p->bytes_moved += ctx.bytes_moved; 447 if (update_bytes_moved_vis) 448 p->bytes_moved_vis += ctx.bytes_moved; 449 450 if (unlikely(r)) 451 break; 452 453 p->evictable = list_prev_entry(p->evictable, tv.head); 454 list_move(&candidate->tv.head, &p->validated); 455 456 return true; 457 } 458 459 return false; 460 } 461 462 static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo) 463 { 464 struct amdgpu_cs_parser *p = param; 465 int r; 466 467 do { 468 r = amdgpu_cs_bo_validate(p, bo); 469 } while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo)); 470 if (r) 471 return r; 472 473 if (bo->shadow) 474 r = amdgpu_cs_bo_validate(p, bo->shadow); 475 476 return r; 477 } 478 479 static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, 480 struct list_head *validated) 481 { 482 struct ttm_operation_ctx ctx = { true, false }; 483 struct amdgpu_bo_list_entry *lobj; 484 int r; 485 486 list_for_each_entry(lobj, validated, tv.head) { 487 struct amdgpu_bo *bo = lobj->robj; 488 bool binding_userptr = false; 489 struct mm_struct *usermm; 490 491 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); 492 if (usermm && usermm != current->mm) 493 return -EPERM; 494 495 /* Check if we have user pages and nobody bound the BO already */ 496 if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) && 497 lobj->user_pages) { 498 amdgpu_ttm_placement_from_domain(bo, 499 AMDGPU_GEM_DOMAIN_CPU); 500 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 501 if (r) 502 return r; 503 amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, 504 lobj->user_pages); 505 binding_userptr = true; 506 } 507 508 if (p->evictable == lobj) 509 p->evictable = NULL; 510 511 r = amdgpu_cs_validate(p, bo); 512 if (r) 513 return r; 514 515 if (binding_userptr) { 516 kvfree(lobj->user_pages); 517 lobj->user_pages = NULL; 518 } 519 } 520 return 0; 521 } 522 523 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, 524 union drm_amdgpu_cs *cs) 525 { 526 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 527 struct amdgpu_bo_list_entry *e; 528 struct list_head duplicates; 529 unsigned i, tries = 10; 530 struct amdgpu_bo *gds; 531 struct amdgpu_bo *gws; 532 struct amdgpu_bo *oa; 533 int r; 534 535 INIT_LIST_HEAD(&p->validated); 536 537 p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); 538 if (p->bo_list) { 539 amdgpu_bo_list_get_list(p->bo_list, &p->validated); 540 if (p->bo_list->first_userptr != p->bo_list->num_entries) 541 p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX); 542 } 543 544 INIT_LIST_HEAD(&duplicates); 545 amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); 546 547 if (p->uf_entry.robj && !p->uf_entry.robj->parent) 548 list_add(&p->uf_entry.tv.head, &p->validated); 549 550 while (1) { 551 struct list_head need_pages; 552 unsigned i; 553 554 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, 555 &duplicates); 556 if (unlikely(r != 0)) { 557 if (r != -ERESTARTSYS) 558 DRM_ERROR("ttm_eu_reserve_buffers failed.\n"); 559 goto error_free_pages; 560 } 561 562 /* Without a BO list we don't have userptr BOs */ 563 if (!p->bo_list) 564 break; 565 566 INIT_LIST_HEAD(&need_pages); 567 for (i = p->bo_list->first_userptr; 568 i < p->bo_list->num_entries; ++i) { 569 struct amdgpu_bo *bo; 570 571 e = &p->bo_list->array[i]; 572 bo = e->robj; 573 574 if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm, 575 &e->user_invalidated) && e->user_pages) { 576 577 /* We acquired a page array, but somebody 578 * invalidated it. Free it and try again 579 */ 580 release_pages(e->user_pages, 581 bo->tbo.ttm->num_pages); 582 kvfree(e->user_pages); 583 e->user_pages = NULL; 584 } 585 586 if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) && 587 !e->user_pages) { 588 list_del(&e->tv.head); 589 list_add(&e->tv.head, &need_pages); 590 591 amdgpu_bo_unreserve(e->robj); 592 } 593 } 594 595 if (list_empty(&need_pages)) 596 break; 597 598 /* Unreserve everything again. */ 599 ttm_eu_backoff_reservation(&p->ticket, &p->validated); 600 601 /* We tried too many times, just abort */ 602 if (!--tries) { 603 r = -EDEADLK; 604 DRM_ERROR("deadlock in %s\n", __func__); 605 goto error_free_pages; 606 } 607 608 /* Fill the page arrays for all userptrs. */ 609 list_for_each_entry(e, &need_pages, tv.head) { 610 struct ttm_tt *ttm = e->robj->tbo.ttm; 611 612 e->user_pages = kvmalloc_array(ttm->num_pages, 613 sizeof(struct page*), 614 GFP_KERNEL | __GFP_ZERO); 615 if (!e->user_pages) { 616 r = -ENOMEM; 617 DRM_ERROR("calloc failure in %s\n", __func__); 618 goto error_free_pages; 619 } 620 621 r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages); 622 if (r) { 623 DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n"); 624 kvfree(e->user_pages); 625 e->user_pages = NULL; 626 goto error_free_pages; 627 } 628 } 629 630 /* And try again. */ 631 list_splice(&need_pages, &p->validated); 632 } 633 634 amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold, 635 &p->bytes_moved_vis_threshold); 636 p->bytes_moved = 0; 637 p->bytes_moved_vis = 0; 638 p->evictable = list_last_entry(&p->validated, 639 struct amdgpu_bo_list_entry, 640 tv.head); 641 642 r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm, 643 amdgpu_cs_validate, p); 644 if (r) { 645 DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n"); 646 goto error_validate; 647 } 648 649 r = amdgpu_cs_list_validate(p, &duplicates); 650 if (r) { 651 DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n"); 652 goto error_validate; 653 } 654 655 r = amdgpu_cs_list_validate(p, &p->validated); 656 if (r) { 657 DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n"); 658 goto error_validate; 659 } 660 661 amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved, 662 p->bytes_moved_vis); 663 664 if (p->bo_list) { 665 struct amdgpu_vm *vm = &fpriv->vm; 666 unsigned i; 667 668 gds = p->bo_list->gds_obj; 669 gws = p->bo_list->gws_obj; 670 oa = p->bo_list->oa_obj; 671 for (i = 0; i < p->bo_list->num_entries; i++) { 672 struct amdgpu_bo *bo = p->bo_list->array[i].robj; 673 674 p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo); 675 } 676 } else { 677 gds = p->adev->gds.gds_gfx_bo; 678 gws = p->adev->gds.gws_gfx_bo; 679 oa = p->adev->gds.oa_gfx_bo; 680 } 681 682 if (gds) { 683 p->job->gds_base = amdgpu_bo_gpu_offset(gds); 684 p->job->gds_size = amdgpu_bo_size(gds); 685 } 686 if (gws) { 687 p->job->gws_base = amdgpu_bo_gpu_offset(gws); 688 p->job->gws_size = amdgpu_bo_size(gws); 689 } 690 if (oa) { 691 p->job->oa_base = amdgpu_bo_gpu_offset(oa); 692 p->job->oa_size = amdgpu_bo_size(oa); 693 } 694 695 if (!r && p->uf_entry.robj) { 696 struct amdgpu_bo *uf = p->uf_entry.robj; 697 698 r = amdgpu_ttm_alloc_gart(&uf->tbo); 699 p->job->uf_addr += amdgpu_bo_gpu_offset(uf); 700 } 701 702 error_validate: 703 if (r) 704 ttm_eu_backoff_reservation(&p->ticket, &p->validated); 705 706 error_free_pages: 707 708 if (p->bo_list) { 709 for (i = p->bo_list->first_userptr; 710 i < p->bo_list->num_entries; ++i) { 711 e = &p->bo_list->array[i]; 712 713 if (!e->user_pages) 714 continue; 715 716 release_pages(e->user_pages, 717 e->robj->tbo.ttm->num_pages); 718 kvfree(e->user_pages); 719 } 720 } 721 722 return r; 723 } 724 725 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) 726 { 727 struct amdgpu_bo_list_entry *e; 728 int r; 729 730 list_for_each_entry(e, &p->validated, tv.head) { 731 struct reservation_object *resv = e->robj->tbo.resv; 732 r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp, 733 amdgpu_bo_explicit_sync(e->robj)); 734 735 if (r) 736 return r; 737 } 738 return 0; 739 } 740 741 /** 742 * cs_parser_fini() - clean parser states 743 * @parser: parser structure holding parsing context. 744 * @error: error number 745 * 746 * If error is set than unvalidate buffer, otherwise just free memory 747 * used by parsing context. 748 **/ 749 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, 750 bool backoff) 751 { 752 unsigned i; 753 754 if (error && backoff) 755 ttm_eu_backoff_reservation(&parser->ticket, 756 &parser->validated); 757 758 for (i = 0; i < parser->num_post_dep_syncobjs; i++) 759 drm_syncobj_put(parser->post_dep_syncobjs[i]); 760 kfree(parser->post_dep_syncobjs); 761 762 dma_fence_put(parser->fence); 763 764 if (parser->ctx) { 765 mutex_unlock(&parser->ctx->lock); 766 amdgpu_ctx_put(parser->ctx); 767 } 768 if (parser->bo_list) 769 amdgpu_bo_list_put(parser->bo_list); 770 771 for (i = 0; i < parser->nchunks; i++) 772 kvfree(parser->chunks[i].kdata); 773 kfree(parser->chunks); 774 if (parser->job) 775 amdgpu_job_free(parser->job); 776 amdgpu_bo_unref(&parser->uf_entry.robj); 777 } 778 779 static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) 780 { 781 struct amdgpu_device *adev = p->adev; 782 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 783 struct amdgpu_vm *vm = &fpriv->vm; 784 struct amdgpu_bo_va *bo_va; 785 struct amdgpu_bo *bo; 786 int i, r; 787 788 r = amdgpu_vm_clear_freed(adev, vm, NULL); 789 if (r) 790 return r; 791 792 r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false); 793 if (r) 794 return r; 795 796 r = amdgpu_sync_fence(adev, &p->job->sync, 797 fpriv->prt_va->last_pt_update, false); 798 if (r) 799 return r; 800 801 if (amdgpu_sriov_vf(adev)) { 802 struct dma_fence *f; 803 804 bo_va = fpriv->csa_va; 805 BUG_ON(!bo_va); 806 r = amdgpu_vm_bo_update(adev, bo_va, false); 807 if (r) 808 return r; 809 810 f = bo_va->last_pt_update; 811 r = amdgpu_sync_fence(adev, &p->job->sync, f, false); 812 if (r) 813 return r; 814 } 815 816 if (p->bo_list) { 817 for (i = 0; i < p->bo_list->num_entries; i++) { 818 struct dma_fence *f; 819 820 /* ignore duplicates */ 821 bo = p->bo_list->array[i].robj; 822 if (!bo) 823 continue; 824 825 bo_va = p->bo_list->array[i].bo_va; 826 if (bo_va == NULL) 827 continue; 828 829 r = amdgpu_vm_bo_update(adev, bo_va, false); 830 if (r) 831 return r; 832 833 f = bo_va->last_pt_update; 834 r = amdgpu_sync_fence(adev, &p->job->sync, f, false); 835 if (r) 836 return r; 837 } 838 839 } 840 841 r = amdgpu_vm_handle_moved(adev, vm); 842 if (r) 843 return r; 844 845 r = amdgpu_vm_update_directories(adev, vm); 846 if (r) 847 return r; 848 849 r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false); 850 if (r) 851 return r; 852 853 if (amdgpu_vm_debug && p->bo_list) { 854 /* Invalidate all BOs to test for userspace bugs */ 855 for (i = 0; i < p->bo_list->num_entries; i++) { 856 /* ignore duplicates */ 857 bo = p->bo_list->array[i].robj; 858 if (!bo) 859 continue; 860 861 amdgpu_vm_bo_invalidate(adev, bo, false); 862 } 863 } 864 865 return r; 866 } 867 868 static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, 869 struct amdgpu_cs_parser *p) 870 { 871 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 872 struct amdgpu_vm *vm = &fpriv->vm; 873 struct amdgpu_ring *ring = p->job->ring; 874 int r; 875 876 /* Only for UVD/VCE VM emulation */ 877 if (p->job->ring->funcs->parse_cs) { 878 unsigned i, j; 879 880 for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) { 881 struct drm_amdgpu_cs_chunk_ib *chunk_ib; 882 struct amdgpu_bo_va_mapping *m; 883 struct amdgpu_bo *aobj = NULL; 884 struct amdgpu_cs_chunk *chunk; 885 uint64_t offset, va_start; 886 struct amdgpu_ib *ib; 887 uint8_t *kptr; 888 889 chunk = &p->chunks[i]; 890 ib = &p->job->ibs[j]; 891 chunk_ib = chunk->kdata; 892 893 if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) 894 continue; 895 896 va_start = chunk_ib->va_start & AMDGPU_VA_HOLE_MASK; 897 r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m); 898 if (r) { 899 DRM_ERROR("IB va_start is invalid\n"); 900 return r; 901 } 902 903 if ((va_start + chunk_ib->ib_bytes) > 904 (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) { 905 DRM_ERROR("IB va_start+ib_bytes is invalid\n"); 906 return -EINVAL; 907 } 908 909 /* the IB should be reserved at this point */ 910 r = amdgpu_bo_kmap(aobj, (void **)&kptr); 911 if (r) { 912 return r; 913 } 914 915 offset = m->start * AMDGPU_GPU_PAGE_SIZE; 916 kptr += va_start - offset; 917 918 memcpy(ib->ptr, kptr, chunk_ib->ib_bytes); 919 amdgpu_bo_kunmap(aobj); 920 921 r = amdgpu_ring_parse_cs(ring, p, j); 922 if (r) 923 return r; 924 925 j++; 926 } 927 } 928 929 if (p->job->vm) { 930 p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.base.bo); 931 932 r = amdgpu_bo_vm_update_pte(p); 933 if (r) 934 return r; 935 936 r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv); 937 if (r) 938 return r; 939 } 940 941 return amdgpu_cs_sync_rings(p); 942 } 943 944 static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, 945 struct amdgpu_cs_parser *parser) 946 { 947 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; 948 struct amdgpu_vm *vm = &fpriv->vm; 949 int i, j; 950 int r, ce_preempt = 0, de_preempt = 0; 951 952 for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) { 953 struct amdgpu_cs_chunk *chunk; 954 struct amdgpu_ib *ib; 955 struct drm_amdgpu_cs_chunk_ib *chunk_ib; 956 struct amdgpu_ring *ring; 957 958 chunk = &parser->chunks[i]; 959 ib = &parser->job->ibs[j]; 960 chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata; 961 962 if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) 963 continue; 964 965 if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && amdgpu_sriov_vf(adev)) { 966 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) { 967 if (chunk_ib->flags & AMDGPU_IB_FLAG_CE) 968 ce_preempt++; 969 else 970 de_preempt++; 971 } 972 973 /* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */ 974 if (ce_preempt > 1 || de_preempt > 1) 975 return -EINVAL; 976 } 977 978 r = amdgpu_queue_mgr_map(adev, &parser->ctx->queue_mgr, chunk_ib->ip_type, 979 chunk_ib->ip_instance, chunk_ib->ring, &ring); 980 if (r) 981 return r; 982 983 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) { 984 parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT; 985 if (!parser->ctx->preamble_presented) { 986 parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; 987 parser->ctx->preamble_presented = true; 988 } 989 } 990 991 if (parser->job->ring && parser->job->ring != ring) 992 return -EINVAL; 993 994 parser->job->ring = ring; 995 996 r = amdgpu_ib_get(adev, vm, 997 ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0, 998 ib); 999 if (r) { 1000 DRM_ERROR("Failed to get ib !\n"); 1001 return r; 1002 } 1003 1004 ib->gpu_addr = chunk_ib->va_start; 1005 ib->length_dw = chunk_ib->ib_bytes / 4; 1006 ib->flags = chunk_ib->flags; 1007 1008 j++; 1009 } 1010 1011 /* UVD & VCE fw doesn't support user fences */ 1012 if (parser->job->uf_addr && ( 1013 parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD || 1014 parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE)) 1015 return -EINVAL; 1016 1017 return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->job->ring->idx); 1018 } 1019 1020 static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, 1021 struct amdgpu_cs_chunk *chunk) 1022 { 1023 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 1024 unsigned num_deps; 1025 int i, r; 1026 struct drm_amdgpu_cs_chunk_dep *deps; 1027 1028 deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata; 1029 num_deps = chunk->length_dw * 4 / 1030 sizeof(struct drm_amdgpu_cs_chunk_dep); 1031 1032 for (i = 0; i < num_deps; ++i) { 1033 struct amdgpu_ring *ring; 1034 struct amdgpu_ctx *ctx; 1035 struct dma_fence *fence; 1036 1037 ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id); 1038 if (ctx == NULL) 1039 return -EINVAL; 1040 1041 r = amdgpu_queue_mgr_map(p->adev, &ctx->queue_mgr, 1042 deps[i].ip_type, 1043 deps[i].ip_instance, 1044 deps[i].ring, &ring); 1045 if (r) { 1046 amdgpu_ctx_put(ctx); 1047 return r; 1048 } 1049 1050 fence = amdgpu_ctx_get_fence(ctx, ring, 1051 deps[i].handle); 1052 if (IS_ERR(fence)) { 1053 r = PTR_ERR(fence); 1054 amdgpu_ctx_put(ctx); 1055 return r; 1056 } else if (fence) { 1057 r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, 1058 true); 1059 dma_fence_put(fence); 1060 amdgpu_ctx_put(ctx); 1061 if (r) 1062 return r; 1063 } 1064 } 1065 return 0; 1066 } 1067 1068 static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p, 1069 uint32_t handle) 1070 { 1071 int r; 1072 struct dma_fence *fence; 1073 r = drm_syncobj_find_fence(p->filp, handle, &fence); 1074 if (r) 1075 return r; 1076 1077 r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true); 1078 dma_fence_put(fence); 1079 1080 return r; 1081 } 1082 1083 static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p, 1084 struct amdgpu_cs_chunk *chunk) 1085 { 1086 unsigned num_deps; 1087 int i, r; 1088 struct drm_amdgpu_cs_chunk_sem *deps; 1089 1090 deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata; 1091 num_deps = chunk->length_dw * 4 / 1092 sizeof(struct drm_amdgpu_cs_chunk_sem); 1093 1094 for (i = 0; i < num_deps; ++i) { 1095 r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle); 1096 if (r) 1097 return r; 1098 } 1099 return 0; 1100 } 1101 1102 static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p, 1103 struct amdgpu_cs_chunk *chunk) 1104 { 1105 unsigned num_deps; 1106 int i; 1107 struct drm_amdgpu_cs_chunk_sem *deps; 1108 deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata; 1109 num_deps = chunk->length_dw * 4 / 1110 sizeof(struct drm_amdgpu_cs_chunk_sem); 1111 1112 p->post_dep_syncobjs = kmalloc_array(num_deps, 1113 sizeof(struct drm_syncobj *), 1114 GFP_KERNEL); 1115 p->num_post_dep_syncobjs = 0; 1116 1117 if (!p->post_dep_syncobjs) 1118 return -ENOMEM; 1119 1120 for (i = 0; i < num_deps; ++i) { 1121 p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle); 1122 if (!p->post_dep_syncobjs[i]) 1123 return -EINVAL; 1124 p->num_post_dep_syncobjs++; 1125 } 1126 return 0; 1127 } 1128 1129 static int amdgpu_cs_dependencies(struct amdgpu_device *adev, 1130 struct amdgpu_cs_parser *p) 1131 { 1132 int i, r; 1133 1134 for (i = 0; i < p->nchunks; ++i) { 1135 struct amdgpu_cs_chunk *chunk; 1136 1137 chunk = &p->chunks[i]; 1138 1139 if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES) { 1140 r = amdgpu_cs_process_fence_dep(p, chunk); 1141 if (r) 1142 return r; 1143 } else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) { 1144 r = amdgpu_cs_process_syncobj_in_dep(p, chunk); 1145 if (r) 1146 return r; 1147 } else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_OUT) { 1148 r = amdgpu_cs_process_syncobj_out_dep(p, chunk); 1149 if (r) 1150 return r; 1151 } 1152 } 1153 1154 return 0; 1155 } 1156 1157 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) 1158 { 1159 int i; 1160 1161 for (i = 0; i < p->num_post_dep_syncobjs; ++i) 1162 drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence); 1163 } 1164 1165 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, 1166 union drm_amdgpu_cs *cs) 1167 { 1168 struct amdgpu_ring *ring = p->job->ring; 1169 struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity; 1170 struct amdgpu_job *job; 1171 unsigned i; 1172 uint64_t seq; 1173 1174 int r; 1175 1176 amdgpu_mn_lock(p->mn); 1177 if (p->bo_list) { 1178 for (i = p->bo_list->first_userptr; 1179 i < p->bo_list->num_entries; ++i) { 1180 struct amdgpu_bo *bo = p->bo_list->array[i].robj; 1181 1182 if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) { 1183 amdgpu_mn_unlock(p->mn); 1184 return -ERESTARTSYS; 1185 } 1186 } 1187 } 1188 1189 job = p->job; 1190 p->job = NULL; 1191 1192 r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp); 1193 if (r) { 1194 amdgpu_job_free(job); 1195 amdgpu_mn_unlock(p->mn); 1196 return r; 1197 } 1198 1199 job->owner = p->filp; 1200 job->fence_ctx = entity->fence_context; 1201 p->fence = dma_fence_get(&job->base.s_fence->finished); 1202 1203 r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq); 1204 if (r) { 1205 dma_fence_put(p->fence); 1206 dma_fence_put(&job->base.s_fence->finished); 1207 amdgpu_job_free(job); 1208 amdgpu_mn_unlock(p->mn); 1209 return r; 1210 } 1211 1212 amdgpu_cs_post_dependencies(p); 1213 1214 cs->out.handle = seq; 1215 job->uf_sequence = seq; 1216 1217 amdgpu_job_free_resources(job); 1218 amdgpu_ring_priority_get(job->ring, job->base.s_priority); 1219 1220 trace_amdgpu_cs_ioctl(job); 1221 drm_sched_entity_push_job(&job->base, entity); 1222 1223 ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); 1224 amdgpu_mn_unlock(p->mn); 1225 1226 return 0; 1227 } 1228 1229 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 1230 { 1231 struct amdgpu_device *adev = dev->dev_private; 1232 union drm_amdgpu_cs *cs = data; 1233 struct amdgpu_cs_parser parser = {}; 1234 bool reserved_buffers = false; 1235 int i, r; 1236 1237 if (!adev->accel_working) 1238 return -EBUSY; 1239 1240 parser.adev = adev; 1241 parser.filp = filp; 1242 1243 r = amdgpu_cs_parser_init(&parser, data); 1244 if (r) { 1245 DRM_ERROR("Failed to initialize parser !\n"); 1246 goto out; 1247 } 1248 1249 r = amdgpu_cs_ib_fill(adev, &parser); 1250 if (r) 1251 goto out; 1252 1253 r = amdgpu_cs_parser_bos(&parser, data); 1254 if (r) { 1255 if (r == -ENOMEM) 1256 DRM_ERROR("Not enough memory for command submission!\n"); 1257 else if (r != -ERESTARTSYS) 1258 DRM_ERROR("Failed to process the buffer list %d!\n", r); 1259 goto out; 1260 } 1261 1262 reserved_buffers = true; 1263 1264 r = amdgpu_cs_dependencies(adev, &parser); 1265 if (r) { 1266 DRM_ERROR("Failed in the dependencies handling %d!\n", r); 1267 goto out; 1268 } 1269 1270 for (i = 0; i < parser.job->num_ibs; i++) 1271 trace_amdgpu_cs(&parser, i); 1272 1273 r = amdgpu_cs_ib_vm_chunk(adev, &parser); 1274 if (r) 1275 goto out; 1276 1277 r = amdgpu_cs_submit(&parser, cs); 1278 1279 out: 1280 amdgpu_cs_parser_fini(&parser, r, reserved_buffers); 1281 return r; 1282 } 1283 1284 /** 1285 * amdgpu_cs_wait_ioctl - wait for a command submission to finish 1286 * 1287 * @dev: drm device 1288 * @data: data from userspace 1289 * @filp: file private 1290 * 1291 * Wait for the command submission identified by handle to finish. 1292 */ 1293 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, 1294 struct drm_file *filp) 1295 { 1296 union drm_amdgpu_wait_cs *wait = data; 1297 struct amdgpu_device *adev = dev->dev_private; 1298 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); 1299 struct amdgpu_ring *ring = NULL; 1300 struct amdgpu_ctx *ctx; 1301 struct dma_fence *fence; 1302 long r; 1303 1304 ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id); 1305 if (ctx == NULL) 1306 return -EINVAL; 1307 1308 r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr, 1309 wait->in.ip_type, wait->in.ip_instance, 1310 wait->in.ring, &ring); 1311 if (r) { 1312 amdgpu_ctx_put(ctx); 1313 return r; 1314 } 1315 1316 fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle); 1317 if (IS_ERR(fence)) 1318 r = PTR_ERR(fence); 1319 else if (fence) { 1320 r = dma_fence_wait_timeout(fence, true, timeout); 1321 if (r > 0 && fence->error) 1322 r = fence->error; 1323 dma_fence_put(fence); 1324 } else 1325 r = 1; 1326 1327 amdgpu_ctx_put(ctx); 1328 if (r < 0) 1329 return r; 1330 1331 memset(wait, 0, sizeof(*wait)); 1332 wait->out.status = (r == 0); 1333 1334 return 0; 1335 } 1336 1337 /** 1338 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence 1339 * 1340 * @adev: amdgpu device 1341 * @filp: file private 1342 * @user: drm_amdgpu_fence copied from user space 1343 */ 1344 static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev, 1345 struct drm_file *filp, 1346 struct drm_amdgpu_fence *user) 1347 { 1348 struct amdgpu_ring *ring; 1349 struct amdgpu_ctx *ctx; 1350 struct dma_fence *fence; 1351 int r; 1352 1353 ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id); 1354 if (ctx == NULL) 1355 return ERR_PTR(-EINVAL); 1356 1357 r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr, user->ip_type, 1358 user->ip_instance, user->ring, &ring); 1359 if (r) { 1360 amdgpu_ctx_put(ctx); 1361 return ERR_PTR(r); 1362 } 1363 1364 fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no); 1365 amdgpu_ctx_put(ctx); 1366 1367 return fence; 1368 } 1369 1370 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, 1371 struct drm_file *filp) 1372 { 1373 struct amdgpu_device *adev = dev->dev_private; 1374 union drm_amdgpu_fence_to_handle *info = data; 1375 struct dma_fence *fence; 1376 struct drm_syncobj *syncobj; 1377 struct sync_file *sync_file; 1378 int fd, r; 1379 1380 fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence); 1381 if (IS_ERR(fence)) 1382 return PTR_ERR(fence); 1383 1384 switch (info->in.what) { 1385 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ: 1386 r = drm_syncobj_create(&syncobj, 0, fence); 1387 dma_fence_put(fence); 1388 if (r) 1389 return r; 1390 r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle); 1391 drm_syncobj_put(syncobj); 1392 return r; 1393 1394 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD: 1395 r = drm_syncobj_create(&syncobj, 0, fence); 1396 dma_fence_put(fence); 1397 if (r) 1398 return r; 1399 r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle); 1400 drm_syncobj_put(syncobj); 1401 return r; 1402 1403 case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD: 1404 fd = get_unused_fd_flags(O_CLOEXEC); 1405 if (fd < 0) { 1406 dma_fence_put(fence); 1407 return fd; 1408 } 1409 1410 sync_file = sync_file_create(fence); 1411 dma_fence_put(fence); 1412 if (!sync_file) { 1413 put_unused_fd(fd); 1414 return -ENOMEM; 1415 } 1416 1417 fd_install(fd, sync_file->file); 1418 info->out.handle = fd; 1419 return 0; 1420 1421 default: 1422 return -EINVAL; 1423 } 1424 } 1425 1426 /** 1427 * amdgpu_cs_wait_all_fence - wait on all fences to signal 1428 * 1429 * @adev: amdgpu device 1430 * @filp: file private 1431 * @wait: wait parameters 1432 * @fences: array of drm_amdgpu_fence 1433 */ 1434 static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev, 1435 struct drm_file *filp, 1436 union drm_amdgpu_wait_fences *wait, 1437 struct drm_amdgpu_fence *fences) 1438 { 1439 uint32_t fence_count = wait->in.fence_count; 1440 unsigned int i; 1441 long r = 1; 1442 1443 for (i = 0; i < fence_count; i++) { 1444 struct dma_fence *fence; 1445 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns); 1446 1447 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); 1448 if (IS_ERR(fence)) 1449 return PTR_ERR(fence); 1450 else if (!fence) 1451 continue; 1452 1453 r = dma_fence_wait_timeout(fence, true, timeout); 1454 dma_fence_put(fence); 1455 if (r < 0) 1456 return r; 1457 1458 if (r == 0) 1459 break; 1460 1461 if (fence->error) 1462 return fence->error; 1463 } 1464 1465 memset(wait, 0, sizeof(*wait)); 1466 wait->out.status = (r > 0); 1467 1468 return 0; 1469 } 1470 1471 /** 1472 * amdgpu_cs_wait_any_fence - wait on any fence to signal 1473 * 1474 * @adev: amdgpu device 1475 * @filp: file private 1476 * @wait: wait parameters 1477 * @fences: array of drm_amdgpu_fence 1478 */ 1479 static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev, 1480 struct drm_file *filp, 1481 union drm_amdgpu_wait_fences *wait, 1482 struct drm_amdgpu_fence *fences) 1483 { 1484 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns); 1485 uint32_t fence_count = wait->in.fence_count; 1486 uint32_t first = ~0; 1487 struct dma_fence **array; 1488 unsigned int i; 1489 long r; 1490 1491 /* Prepare the fence array */ 1492 array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL); 1493 1494 if (array == NULL) 1495 return -ENOMEM; 1496 1497 for (i = 0; i < fence_count; i++) { 1498 struct dma_fence *fence; 1499 1500 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); 1501 if (IS_ERR(fence)) { 1502 r = PTR_ERR(fence); 1503 goto err_free_fence_array; 1504 } else if (fence) { 1505 array[i] = fence; 1506 } else { /* NULL, the fence has been already signaled */ 1507 r = 1; 1508 first = i; 1509 goto out; 1510 } 1511 } 1512 1513 r = dma_fence_wait_any_timeout(array, fence_count, true, timeout, 1514 &first); 1515 if (r < 0) 1516 goto err_free_fence_array; 1517 1518 out: 1519 memset(wait, 0, sizeof(*wait)); 1520 wait->out.status = (r > 0); 1521 wait->out.first_signaled = first; 1522 1523 if (first < fence_count && array[first]) 1524 r = array[first]->error; 1525 else 1526 r = 0; 1527 1528 err_free_fence_array: 1529 for (i = 0; i < fence_count; i++) 1530 dma_fence_put(array[i]); 1531 kfree(array); 1532 1533 return r; 1534 } 1535 1536 /** 1537 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish 1538 * 1539 * @dev: drm device 1540 * @data: data from userspace 1541 * @filp: file private 1542 */ 1543 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, 1544 struct drm_file *filp) 1545 { 1546 struct amdgpu_device *adev = dev->dev_private; 1547 union drm_amdgpu_wait_fences *wait = data; 1548 uint32_t fence_count = wait->in.fence_count; 1549 struct drm_amdgpu_fence *fences_user; 1550 struct drm_amdgpu_fence *fences; 1551 int r; 1552 1553 /* Get the fences from userspace */ 1554 fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence), 1555 GFP_KERNEL); 1556 if (fences == NULL) 1557 return -ENOMEM; 1558 1559 fences_user = u64_to_user_ptr(wait->in.fences); 1560 if (copy_from_user(fences, fences_user, 1561 sizeof(struct drm_amdgpu_fence) * fence_count)) { 1562 r = -EFAULT; 1563 goto err_free_fences; 1564 } 1565 1566 if (wait->in.wait_all) 1567 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences); 1568 else 1569 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences); 1570 1571 err_free_fences: 1572 kfree(fences); 1573 1574 return r; 1575 } 1576 1577 /** 1578 * amdgpu_cs_find_bo_va - find bo_va for VM address 1579 * 1580 * @parser: command submission parser context 1581 * @addr: VM address 1582 * @bo: resulting BO of the mapping found 1583 * 1584 * Search the buffer objects in the command submission context for a certain 1585 * virtual memory address. Returns allocation structure when found, NULL 1586 * otherwise. 1587 */ 1588 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, 1589 uint64_t addr, struct amdgpu_bo **bo, 1590 struct amdgpu_bo_va_mapping **map) 1591 { 1592 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; 1593 struct ttm_operation_ctx ctx = { false, false }; 1594 struct amdgpu_vm *vm = &fpriv->vm; 1595 struct amdgpu_bo_va_mapping *mapping; 1596 int r; 1597 1598 addr /= AMDGPU_GPU_PAGE_SIZE; 1599 1600 mapping = amdgpu_vm_bo_lookup_mapping(vm, addr); 1601 if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo) 1602 return -EINVAL; 1603 1604 *bo = mapping->bo_va->base.bo; 1605 *map = mapping; 1606 1607 /* Double check that the BO is reserved by this CS */ 1608 if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket) 1609 return -EINVAL; 1610 1611 if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) { 1612 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 1613 amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains); 1614 r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx); 1615 if (r) 1616 return r; 1617 } 1618 1619 return amdgpu_ttm_alloc_gart(&(*bo)->tbo); 1620 } 1621