1 /* 2 * Copyright 2008 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: 25 * Jerome Glisse <glisse@freedesktop.org> 26 */ 27 #include <linux/pagemap.h> 28 #include <drm/drmP.h> 29 #include <drm/amdgpu_drm.h> 30 #include "amdgpu.h" 31 #include "amdgpu_trace.h" 32 33 int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, 34 u32 ip_instance, u32 ring, 35 struct amdgpu_ring **out_ring) 36 { 37 /* Right now all IPs have only one instance - multiple rings. */ 38 if (ip_instance != 0) { 39 DRM_ERROR("invalid ip instance: %d\n", ip_instance); 40 return -EINVAL; 41 } 42 43 switch (ip_type) { 44 default: 45 DRM_ERROR("unknown ip type: %d\n", ip_type); 46 return -EINVAL; 47 case AMDGPU_HW_IP_GFX: 48 if (ring < adev->gfx.num_gfx_rings) { 49 *out_ring = &adev->gfx.gfx_ring[ring]; 50 } else { 51 DRM_ERROR("only %d gfx rings are supported now\n", 52 adev->gfx.num_gfx_rings); 53 return -EINVAL; 54 } 55 break; 56 case AMDGPU_HW_IP_COMPUTE: 57 if (ring < adev->gfx.num_compute_rings) { 58 *out_ring = &adev->gfx.compute_ring[ring]; 59 } else { 60 DRM_ERROR("only %d compute rings are supported now\n", 61 adev->gfx.num_compute_rings); 62 return -EINVAL; 63 } 64 break; 65 case AMDGPU_HW_IP_DMA: 66 if (ring < adev->sdma.num_instances) { 67 *out_ring = &adev->sdma.instance[ring].ring; 68 } else { 69 DRM_ERROR("only %d SDMA rings are supported\n", 70 adev->sdma.num_instances); 71 return -EINVAL; 72 } 73 break; 74 case AMDGPU_HW_IP_UVD: 75 *out_ring = &adev->uvd.ring; 76 break; 77 case AMDGPU_HW_IP_VCE: 78 if (ring < 2){ 79 *out_ring = &adev->vce.ring[ring]; 80 } else { 81 DRM_ERROR("only two VCE rings are supported\n"); 82 return -EINVAL; 83 } 84 break; 85 } 86 return 0; 87 } 88 89 static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, 90 struct drm_amdgpu_cs_chunk_fence *data, 91 uint32_t *offset) 92 { 93 struct drm_gem_object *gobj; 94 unsigned long size; 95 96 gobj = drm_gem_object_lookup(p->filp, data->handle); 97 if (gobj == NULL) 98 return -EINVAL; 99 100 p->uf_entry.robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); 101 p->uf_entry.priority = 0; 102 p->uf_entry.tv.bo = &p->uf_entry.robj->tbo; 103 p->uf_entry.tv.shared = true; 104 p->uf_entry.user_pages = NULL; 105 106 size = amdgpu_bo_size(p->uf_entry.robj); 107 if (size != PAGE_SIZE || (data->offset + 8) > size) 108 return -EINVAL; 109 110 *offset = data->offset; 111 112 drm_gem_object_unreference_unlocked(gobj); 113 114 if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) { 115 amdgpu_bo_unref(&p->uf_entry.robj); 116 return -EINVAL; 117 } 118 119 return 0; 120 } 121 122 int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) 123 { 124 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 125 struct amdgpu_vm *vm = &fpriv->vm; 126 union drm_amdgpu_cs *cs = data; 127 uint64_t *chunk_array_user; 128 uint64_t *chunk_array; 129 unsigned size, num_ibs = 0; 130 uint32_t uf_offset = 0; 131 int i; 132 int ret; 133 134 if (cs->in.num_chunks == 0) 135 return 0; 136 137 chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL); 138 if (!chunk_array) 139 return -ENOMEM; 140 141 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id); 142 if (!p->ctx) { 143 ret = -EINVAL; 144 goto free_chunk; 145 } 146 147 /* get chunks */ 148 chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks); 149 if (copy_from_user(chunk_array, chunk_array_user, 150 sizeof(uint64_t)*cs->in.num_chunks)) { 151 ret = -EFAULT; 152 goto put_ctx; 153 } 154 155 p->nchunks = cs->in.num_chunks; 156 p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), 157 GFP_KERNEL); 158 if (!p->chunks) { 159 ret = -ENOMEM; 160 goto put_ctx; 161 } 162 163 for (i = 0; i < p->nchunks; i++) { 164 struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL; 165 struct drm_amdgpu_cs_chunk user_chunk; 166 uint32_t __user *cdata; 167 168 chunk_ptr = (void __user *)(unsigned long)chunk_array[i]; 169 if (copy_from_user(&user_chunk, chunk_ptr, 170 sizeof(struct drm_amdgpu_cs_chunk))) { 171 ret = -EFAULT; 172 i--; 173 goto free_partial_kdata; 174 } 175 p->chunks[i].chunk_id = user_chunk.chunk_id; 176 p->chunks[i].length_dw = user_chunk.length_dw; 177 178 size = p->chunks[i].length_dw; 179 cdata = (void __user *)(unsigned long)user_chunk.chunk_data; 180 181 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); 182 if (p->chunks[i].kdata == NULL) { 183 ret = -ENOMEM; 184 i--; 185 goto free_partial_kdata; 186 } 187 size *= sizeof(uint32_t); 188 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { 189 ret = -EFAULT; 190 goto free_partial_kdata; 191 } 192 193 switch (p->chunks[i].chunk_id) { 194 case AMDGPU_CHUNK_ID_IB: 195 ++num_ibs; 196 break; 197 198 case AMDGPU_CHUNK_ID_FENCE: 199 size = sizeof(struct drm_amdgpu_cs_chunk_fence); 200 if (p->chunks[i].length_dw * sizeof(uint32_t) < size) { 201 ret = -EINVAL; 202 goto free_partial_kdata; 203 } 204 205 ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata, 206 &uf_offset); 207 if (ret) 208 goto free_partial_kdata; 209 210 break; 211 212 case AMDGPU_CHUNK_ID_DEPENDENCIES: 213 break; 214 215 default: 216 ret = -EINVAL; 217 goto free_partial_kdata; 218 } 219 } 220 221 ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm); 222 if (ret) 223 goto free_all_kdata; 224 225 if (p->uf_entry.robj) 226 p->job->uf_addr = uf_offset; 227 kfree(chunk_array); 228 return 0; 229 230 free_all_kdata: 231 i = p->nchunks - 1; 232 free_partial_kdata: 233 for (; i >= 0; i--) 234 drm_free_large(p->chunks[i].kdata); 235 kfree(p->chunks); 236 put_ctx: 237 amdgpu_ctx_put(p->ctx); 238 free_chunk: 239 kfree(chunk_array); 240 241 return ret; 242 } 243 244 /* Convert microseconds to bytes. */ 245 static u64 us_to_bytes(struct amdgpu_device *adev, s64 us) 246 { 247 if (us <= 0 || !adev->mm_stats.log2_max_MBps) 248 return 0; 249 250 /* Since accum_us is incremented by a million per second, just 251 * multiply it by the number of MB/s to get the number of bytes. 252 */ 253 return us << adev->mm_stats.log2_max_MBps; 254 } 255 256 static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes) 257 { 258 if (!adev->mm_stats.log2_max_MBps) 259 return 0; 260 261 return bytes >> adev->mm_stats.log2_max_MBps; 262 } 263 264 /* Returns how many bytes TTM can move right now. If no bytes can be moved, 265 * it returns 0. If it returns non-zero, it's OK to move at least one buffer, 266 * which means it can go over the threshold once. If that happens, the driver 267 * will be in debt and no other buffer migrations can be done until that debt 268 * is repaid. 269 * 270 * This approach allows moving a buffer of any size (it's important to allow 271 * that). 272 * 273 * The currency is simply time in microseconds and it increases as the clock 274 * ticks. The accumulated microseconds (us) are converted to bytes and 275 * returned. 276 */ 277 static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) 278 { 279 s64 time_us, increment_us; 280 u64 max_bytes; 281 u64 free_vram, total_vram, used_vram; 282 283 /* Allow a maximum of 200 accumulated ms. This is basically per-IB 284 * throttling. 285 * 286 * It means that in order to get full max MBps, at least 5 IBs per 287 * second must be submitted and not more than 200ms apart from each 288 * other. 289 */ 290 const s64 us_upper_bound = 200000; 291 292 if (!adev->mm_stats.log2_max_MBps) 293 return 0; 294 295 total_vram = adev->mc.real_vram_size - adev->vram_pin_size; 296 used_vram = atomic64_read(&adev->vram_usage); 297 free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram; 298 299 spin_lock(&adev->mm_stats.lock); 300 301 /* Increase the amount of accumulated us. */ 302 time_us = ktime_to_us(ktime_get()); 303 increment_us = time_us - adev->mm_stats.last_update_us; 304 adev->mm_stats.last_update_us = time_us; 305 adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us, 306 us_upper_bound); 307 308 /* This prevents the short period of low performance when the VRAM 309 * usage is low and the driver is in debt or doesn't have enough 310 * accumulated us to fill VRAM quickly. 311 * 312 * The situation can occur in these cases: 313 * - a lot of VRAM is freed by userspace 314 * - the presence of a big buffer causes a lot of evictions 315 * (solution: split buffers into smaller ones) 316 * 317 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting 318 * accum_us to a positive number. 319 */ 320 if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) { 321 s64 min_us; 322 323 /* Be more aggresive on dGPUs. Try to fill a portion of free 324 * VRAM now. 325 */ 326 if (!(adev->flags & AMD_IS_APU)) 327 min_us = bytes_to_us(adev, free_vram / 4); 328 else 329 min_us = 0; /* Reset accum_us on APUs. */ 330 331 adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us); 332 } 333 334 /* This returns 0 if the driver is in debt to disallow (optional) 335 * buffer moves. 336 */ 337 max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us); 338 339 spin_unlock(&adev->mm_stats.lock); 340 return max_bytes; 341 } 342 343 /* Report how many bytes have really been moved for the last command 344 * submission. This can result in a debt that can stop buffer migrations 345 * temporarily. 346 */ 347 static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, 348 u64 num_bytes) 349 { 350 spin_lock(&adev->mm_stats.lock); 351 adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes); 352 spin_unlock(&adev->mm_stats.lock); 353 } 354 355 static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, 356 struct amdgpu_bo *bo) 357 { 358 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 359 u64 initial_bytes_moved; 360 uint32_t domain; 361 int r; 362 363 if (bo->pin_count) 364 return 0; 365 366 /* Don't move this buffer if we have depleted our allowance 367 * to move it. Don't move anything if the threshold is zero. 368 */ 369 if (p->bytes_moved < p->bytes_moved_threshold) 370 domain = bo->prefered_domains; 371 else 372 domain = bo->allowed_domains; 373 374 retry: 375 amdgpu_ttm_placement_from_domain(bo, domain); 376 initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); 377 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 378 p->bytes_moved += atomic64_read(&adev->num_bytes_moved) - 379 initial_bytes_moved; 380 381 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { 382 domain = bo->allowed_domains; 383 goto retry; 384 } 385 386 return r; 387 } 388 389 /* Last resort, try to evict something from the current working set */ 390 static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, 391 struct amdgpu_bo *validated) 392 { 393 uint32_t domain = validated->allowed_domains; 394 int r; 395 396 if (!p->evictable) 397 return false; 398 399 for (;&p->evictable->tv.head != &p->validated; 400 p->evictable = list_prev_entry(p->evictable, tv.head)) { 401 402 struct amdgpu_bo_list_entry *candidate = p->evictable; 403 struct amdgpu_bo *bo = candidate->robj; 404 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 405 u64 initial_bytes_moved; 406 uint32_t other; 407 408 /* If we reached our current BO we can forget it */ 409 if (candidate->robj == validated) 410 break; 411 412 other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); 413 414 /* Check if this BO is in one of the domains we need space for */ 415 if (!(other & domain)) 416 continue; 417 418 /* Check if we can move this BO somewhere else */ 419 other = bo->allowed_domains & ~domain; 420 if (!other) 421 continue; 422 423 /* Good we can try to move this BO somewhere else */ 424 amdgpu_ttm_placement_from_domain(bo, other); 425 initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); 426 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 427 p->bytes_moved += atomic64_read(&adev->num_bytes_moved) - 428 initial_bytes_moved; 429 430 if (unlikely(r)) 431 break; 432 433 p->evictable = list_prev_entry(p->evictable, tv.head); 434 list_move(&candidate->tv.head, &p->validated); 435 436 return true; 437 } 438 439 return false; 440 } 441 442 static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo) 443 { 444 struct amdgpu_cs_parser *p = param; 445 int r; 446 447 do { 448 r = amdgpu_cs_bo_validate(p, bo); 449 } while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo)); 450 if (r) 451 return r; 452 453 if (bo->shadow) 454 r = amdgpu_cs_bo_validate(p, bo); 455 456 return r; 457 } 458 459 static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, 460 struct list_head *validated) 461 { 462 struct amdgpu_bo_list_entry *lobj; 463 int r; 464 465 list_for_each_entry(lobj, validated, tv.head) { 466 struct amdgpu_bo *bo = lobj->robj; 467 bool binding_userptr = false; 468 struct mm_struct *usermm; 469 470 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); 471 if (usermm && usermm != current->mm) 472 return -EPERM; 473 474 /* Check if we have user pages and nobody bound the BO already */ 475 if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) { 476 size_t size = sizeof(struct page *); 477 478 size *= bo->tbo.ttm->num_pages; 479 memcpy(bo->tbo.ttm->pages, lobj->user_pages, size); 480 binding_userptr = true; 481 } 482 483 if (p->evictable == lobj) 484 p->evictable = NULL; 485 486 r = amdgpu_cs_validate(p, bo); 487 if (r) 488 return r; 489 490 if (binding_userptr) { 491 drm_free_large(lobj->user_pages); 492 lobj->user_pages = NULL; 493 } 494 } 495 return 0; 496 } 497 498 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, 499 union drm_amdgpu_cs *cs) 500 { 501 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 502 struct amdgpu_bo_list_entry *e; 503 struct list_head duplicates; 504 bool need_mmap_lock = false; 505 unsigned i, tries = 10; 506 int r; 507 508 INIT_LIST_HEAD(&p->validated); 509 510 p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); 511 if (p->bo_list) { 512 need_mmap_lock = p->bo_list->first_userptr != 513 p->bo_list->num_entries; 514 amdgpu_bo_list_get_list(p->bo_list, &p->validated); 515 } 516 517 INIT_LIST_HEAD(&duplicates); 518 amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); 519 520 if (p->uf_entry.robj) 521 list_add(&p->uf_entry.tv.head, &p->validated); 522 523 if (need_mmap_lock) 524 down_read(¤t->mm->mmap_sem); 525 526 while (1) { 527 struct list_head need_pages; 528 unsigned i; 529 530 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, 531 &duplicates); 532 if (unlikely(r != 0)) { 533 if (r != -ERESTARTSYS) 534 DRM_ERROR("ttm_eu_reserve_buffers failed.\n"); 535 goto error_free_pages; 536 } 537 538 /* Without a BO list we don't have userptr BOs */ 539 if (!p->bo_list) 540 break; 541 542 INIT_LIST_HEAD(&need_pages); 543 for (i = p->bo_list->first_userptr; 544 i < p->bo_list->num_entries; ++i) { 545 546 e = &p->bo_list->array[i]; 547 548 if (amdgpu_ttm_tt_userptr_invalidated(e->robj->tbo.ttm, 549 &e->user_invalidated) && e->user_pages) { 550 551 /* We acquired a page array, but somebody 552 * invalidated it. Free it an try again 553 */ 554 release_pages(e->user_pages, 555 e->robj->tbo.ttm->num_pages, 556 false); 557 drm_free_large(e->user_pages); 558 e->user_pages = NULL; 559 } 560 561 if (e->robj->tbo.ttm->state != tt_bound && 562 !e->user_pages) { 563 list_del(&e->tv.head); 564 list_add(&e->tv.head, &need_pages); 565 566 amdgpu_bo_unreserve(e->robj); 567 } 568 } 569 570 if (list_empty(&need_pages)) 571 break; 572 573 /* Unreserve everything again. */ 574 ttm_eu_backoff_reservation(&p->ticket, &p->validated); 575 576 /* We tried too many times, just abort */ 577 if (!--tries) { 578 r = -EDEADLK; 579 DRM_ERROR("deadlock in %s\n", __func__); 580 goto error_free_pages; 581 } 582 583 /* Fill the page arrays for all useptrs. */ 584 list_for_each_entry(e, &need_pages, tv.head) { 585 struct ttm_tt *ttm = e->robj->tbo.ttm; 586 587 e->user_pages = drm_calloc_large(ttm->num_pages, 588 sizeof(struct page*)); 589 if (!e->user_pages) { 590 r = -ENOMEM; 591 DRM_ERROR("calloc failure in %s\n", __func__); 592 goto error_free_pages; 593 } 594 595 r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages); 596 if (r) { 597 DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n"); 598 drm_free_large(e->user_pages); 599 e->user_pages = NULL; 600 goto error_free_pages; 601 } 602 } 603 604 /* And try again. */ 605 list_splice(&need_pages, &p->validated); 606 } 607 608 p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev); 609 p->bytes_moved = 0; 610 p->evictable = list_last_entry(&p->validated, 611 struct amdgpu_bo_list_entry, 612 tv.head); 613 614 r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm, 615 amdgpu_cs_validate, p); 616 if (r) { 617 DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n"); 618 goto error_validate; 619 } 620 621 r = amdgpu_cs_list_validate(p, &duplicates); 622 if (r) { 623 DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n"); 624 goto error_validate; 625 } 626 627 r = amdgpu_cs_list_validate(p, &p->validated); 628 if (r) { 629 DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n"); 630 goto error_validate; 631 } 632 633 amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved); 634 635 fpriv->vm.last_eviction_counter = 636 atomic64_read(&p->adev->num_evictions); 637 638 if (p->bo_list) { 639 struct amdgpu_bo *gds = p->bo_list->gds_obj; 640 struct amdgpu_bo *gws = p->bo_list->gws_obj; 641 struct amdgpu_bo *oa = p->bo_list->oa_obj; 642 struct amdgpu_vm *vm = &fpriv->vm; 643 unsigned i; 644 645 for (i = 0; i < p->bo_list->num_entries; i++) { 646 struct amdgpu_bo *bo = p->bo_list->array[i].robj; 647 648 p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo); 649 } 650 651 if (gds) { 652 p->job->gds_base = amdgpu_bo_gpu_offset(gds); 653 p->job->gds_size = amdgpu_bo_size(gds); 654 } 655 if (gws) { 656 p->job->gws_base = amdgpu_bo_gpu_offset(gws); 657 p->job->gws_size = amdgpu_bo_size(gws); 658 } 659 if (oa) { 660 p->job->oa_base = amdgpu_bo_gpu_offset(oa); 661 p->job->oa_size = amdgpu_bo_size(oa); 662 } 663 } 664 665 if (!r && p->uf_entry.robj) { 666 struct amdgpu_bo *uf = p->uf_entry.robj; 667 668 r = amdgpu_ttm_bind(&uf->tbo, &uf->tbo.mem); 669 p->job->uf_addr += amdgpu_bo_gpu_offset(uf); 670 } 671 672 error_validate: 673 if (r) { 674 amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm); 675 ttm_eu_backoff_reservation(&p->ticket, &p->validated); 676 } 677 678 error_free_pages: 679 680 if (need_mmap_lock) 681 up_read(¤t->mm->mmap_sem); 682 683 if (p->bo_list) { 684 for (i = p->bo_list->first_userptr; 685 i < p->bo_list->num_entries; ++i) { 686 e = &p->bo_list->array[i]; 687 688 if (!e->user_pages) 689 continue; 690 691 release_pages(e->user_pages, 692 e->robj->tbo.ttm->num_pages, 693 false); 694 drm_free_large(e->user_pages); 695 } 696 } 697 698 return r; 699 } 700 701 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) 702 { 703 struct amdgpu_bo_list_entry *e; 704 int r; 705 706 list_for_each_entry(e, &p->validated, tv.head) { 707 struct reservation_object *resv = e->robj->tbo.resv; 708 r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp); 709 710 if (r) 711 return r; 712 } 713 return 0; 714 } 715 716 /** 717 * cs_parser_fini() - clean parser states 718 * @parser: parser structure holding parsing context. 719 * @error: error number 720 * 721 * If error is set than unvalidate buffer, otherwise just free memory 722 * used by parsing context. 723 **/ 724 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) 725 { 726 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; 727 unsigned i; 728 729 if (!error) { 730 amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm); 731 732 ttm_eu_fence_buffer_objects(&parser->ticket, 733 &parser->validated, 734 parser->fence); 735 } else if (backoff) { 736 ttm_eu_backoff_reservation(&parser->ticket, 737 &parser->validated); 738 } 739 dma_fence_put(parser->fence); 740 741 if (parser->ctx) 742 amdgpu_ctx_put(parser->ctx); 743 if (parser->bo_list) 744 amdgpu_bo_list_put(parser->bo_list); 745 746 for (i = 0; i < parser->nchunks; i++) 747 drm_free_large(parser->chunks[i].kdata); 748 kfree(parser->chunks); 749 if (parser->job) 750 amdgpu_job_free(parser->job); 751 amdgpu_bo_unref(&parser->uf_entry.robj); 752 } 753 754 static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, 755 struct amdgpu_vm *vm) 756 { 757 struct amdgpu_device *adev = p->adev; 758 struct amdgpu_bo_va *bo_va; 759 struct amdgpu_bo *bo; 760 int i, r; 761 762 r = amdgpu_vm_update_page_directory(adev, vm); 763 if (r) 764 return r; 765 766 r = amdgpu_sync_fence(adev, &p->job->sync, vm->page_directory_fence); 767 if (r) 768 return r; 769 770 r = amdgpu_vm_clear_freed(adev, vm); 771 if (r) 772 return r; 773 774 if (p->bo_list) { 775 for (i = 0; i < p->bo_list->num_entries; i++) { 776 struct dma_fence *f; 777 778 /* ignore duplicates */ 779 bo = p->bo_list->array[i].robj; 780 if (!bo) 781 continue; 782 783 bo_va = p->bo_list->array[i].bo_va; 784 if (bo_va == NULL) 785 continue; 786 787 r = amdgpu_vm_bo_update(adev, bo_va, false); 788 if (r) 789 return r; 790 791 f = bo_va->last_pt_update; 792 r = amdgpu_sync_fence(adev, &p->job->sync, f); 793 if (r) 794 return r; 795 } 796 797 } 798 799 r = amdgpu_vm_clear_invalids(adev, vm, &p->job->sync); 800 801 if (amdgpu_vm_debug && p->bo_list) { 802 /* Invalidate all BOs to test for userspace bugs */ 803 for (i = 0; i < p->bo_list->num_entries; i++) { 804 /* ignore duplicates */ 805 bo = p->bo_list->array[i].robj; 806 if (!bo) 807 continue; 808 809 amdgpu_vm_bo_invalidate(adev, bo); 810 } 811 } 812 813 return r; 814 } 815 816 static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, 817 struct amdgpu_cs_parser *p) 818 { 819 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 820 struct amdgpu_vm *vm = &fpriv->vm; 821 struct amdgpu_ring *ring = p->job->ring; 822 int i, r; 823 824 /* Only for UVD/VCE VM emulation */ 825 if (ring->funcs->parse_cs) { 826 for (i = 0; i < p->job->num_ibs; i++) { 827 r = amdgpu_ring_parse_cs(ring, p, i); 828 if (r) 829 return r; 830 } 831 } 832 833 if (p->job->vm) { 834 p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); 835 836 r = amdgpu_bo_vm_update_pte(p, vm); 837 if (r) 838 return r; 839 } 840 841 return amdgpu_cs_sync_rings(p); 842 } 843 844 static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, 845 struct amdgpu_cs_parser *parser) 846 { 847 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; 848 struct amdgpu_vm *vm = &fpriv->vm; 849 int i, j; 850 int r; 851 852 for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) { 853 struct amdgpu_cs_chunk *chunk; 854 struct amdgpu_ib *ib; 855 struct drm_amdgpu_cs_chunk_ib *chunk_ib; 856 struct amdgpu_ring *ring; 857 858 chunk = &parser->chunks[i]; 859 ib = &parser->job->ibs[j]; 860 chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata; 861 862 if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) 863 continue; 864 865 r = amdgpu_cs_get_ring(adev, chunk_ib->ip_type, 866 chunk_ib->ip_instance, chunk_ib->ring, 867 &ring); 868 if (r) 869 return r; 870 871 if (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) { 872 parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT; 873 if (!parser->ctx->preamble_presented) { 874 parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; 875 parser->ctx->preamble_presented = true; 876 } 877 } 878 879 if (parser->job->ring && parser->job->ring != ring) 880 return -EINVAL; 881 882 parser->job->ring = ring; 883 884 if (ring->funcs->parse_cs) { 885 struct amdgpu_bo_va_mapping *m; 886 struct amdgpu_bo *aobj = NULL; 887 uint64_t offset; 888 uint8_t *kptr; 889 890 m = amdgpu_cs_find_mapping(parser, chunk_ib->va_start, 891 &aobj); 892 if (!aobj) { 893 DRM_ERROR("IB va_start is invalid\n"); 894 return -EINVAL; 895 } 896 897 if ((chunk_ib->va_start + chunk_ib->ib_bytes) > 898 (m->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) { 899 DRM_ERROR("IB va_start+ib_bytes is invalid\n"); 900 return -EINVAL; 901 } 902 903 /* the IB should be reserved at this point */ 904 r = amdgpu_bo_kmap(aobj, (void **)&kptr); 905 if (r) { 906 return r; 907 } 908 909 offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE; 910 kptr += chunk_ib->va_start - offset; 911 912 r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib); 913 if (r) { 914 DRM_ERROR("Failed to get ib !\n"); 915 return r; 916 } 917 918 memcpy(ib->ptr, kptr, chunk_ib->ib_bytes); 919 amdgpu_bo_kunmap(aobj); 920 } else { 921 r = amdgpu_ib_get(adev, vm, 0, ib); 922 if (r) { 923 DRM_ERROR("Failed to get ib !\n"); 924 return r; 925 } 926 927 } 928 929 ib->gpu_addr = chunk_ib->va_start; 930 ib->length_dw = chunk_ib->ib_bytes / 4; 931 ib->flags = chunk_ib->flags; 932 j++; 933 } 934 935 /* UVD & VCE fw doesn't support user fences */ 936 if (parser->job->uf_addr && ( 937 parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD || 938 parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE)) 939 return -EINVAL; 940 941 return 0; 942 } 943 944 static int amdgpu_cs_dependencies(struct amdgpu_device *adev, 945 struct amdgpu_cs_parser *p) 946 { 947 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 948 int i, j, r; 949 950 for (i = 0; i < p->nchunks; ++i) { 951 struct drm_amdgpu_cs_chunk_dep *deps; 952 struct amdgpu_cs_chunk *chunk; 953 unsigned num_deps; 954 955 chunk = &p->chunks[i]; 956 957 if (chunk->chunk_id != AMDGPU_CHUNK_ID_DEPENDENCIES) 958 continue; 959 960 deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata; 961 num_deps = chunk->length_dw * 4 / 962 sizeof(struct drm_amdgpu_cs_chunk_dep); 963 964 for (j = 0; j < num_deps; ++j) { 965 struct amdgpu_ring *ring; 966 struct amdgpu_ctx *ctx; 967 struct dma_fence *fence; 968 969 r = amdgpu_cs_get_ring(adev, deps[j].ip_type, 970 deps[j].ip_instance, 971 deps[j].ring, &ring); 972 if (r) 973 return r; 974 975 ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id); 976 if (ctx == NULL) 977 return -EINVAL; 978 979 fence = amdgpu_ctx_get_fence(ctx, ring, 980 deps[j].handle); 981 if (IS_ERR(fence)) { 982 r = PTR_ERR(fence); 983 amdgpu_ctx_put(ctx); 984 return r; 985 986 } else if (fence) { 987 r = amdgpu_sync_fence(adev, &p->job->sync, 988 fence); 989 dma_fence_put(fence); 990 amdgpu_ctx_put(ctx); 991 if (r) 992 return r; 993 } 994 } 995 } 996 997 return 0; 998 } 999 1000 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, 1001 union drm_amdgpu_cs *cs) 1002 { 1003 struct amdgpu_ring *ring = p->job->ring; 1004 struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity; 1005 struct amdgpu_job *job; 1006 int r; 1007 1008 job = p->job; 1009 p->job = NULL; 1010 1011 r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp); 1012 if (r) { 1013 amdgpu_job_free(job); 1014 return r; 1015 } 1016 1017 job->owner = p->filp; 1018 job->fence_ctx = entity->fence_context; 1019 p->fence = dma_fence_get(&job->base.s_fence->finished); 1020 cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence); 1021 job->uf_sequence = cs->out.handle; 1022 amdgpu_job_free_resources(job); 1023 1024 trace_amdgpu_cs_ioctl(job); 1025 amd_sched_entity_push_job(&job->base); 1026 1027 return 0; 1028 } 1029 1030 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 1031 { 1032 struct amdgpu_device *adev = dev->dev_private; 1033 union drm_amdgpu_cs *cs = data; 1034 struct amdgpu_cs_parser parser = {}; 1035 bool reserved_buffers = false; 1036 int i, r; 1037 1038 if (!adev->accel_working) 1039 return -EBUSY; 1040 1041 parser.adev = adev; 1042 parser.filp = filp; 1043 1044 r = amdgpu_cs_parser_init(&parser, data); 1045 if (r) { 1046 DRM_ERROR("Failed to initialize parser !\n"); 1047 goto out; 1048 } 1049 1050 r = amdgpu_cs_parser_bos(&parser, data); 1051 if (r) { 1052 if (r == -ENOMEM) 1053 DRM_ERROR("Not enough memory for command submission!\n"); 1054 else if (r != -ERESTARTSYS) 1055 DRM_ERROR("Failed to process the buffer list %d!\n", r); 1056 goto out; 1057 } 1058 1059 reserved_buffers = true; 1060 r = amdgpu_cs_ib_fill(adev, &parser); 1061 if (r) 1062 goto out; 1063 1064 r = amdgpu_cs_dependencies(adev, &parser); 1065 if (r) { 1066 DRM_ERROR("Failed in the dependencies handling %d!\n", r); 1067 goto out; 1068 } 1069 1070 for (i = 0; i < parser.job->num_ibs; i++) 1071 trace_amdgpu_cs(&parser, i); 1072 1073 r = amdgpu_cs_ib_vm_chunk(adev, &parser); 1074 if (r) 1075 goto out; 1076 1077 r = amdgpu_cs_submit(&parser, cs); 1078 1079 out: 1080 amdgpu_cs_parser_fini(&parser, r, reserved_buffers); 1081 return r; 1082 } 1083 1084 /** 1085 * amdgpu_cs_wait_ioctl - wait for a command submission to finish 1086 * 1087 * @dev: drm device 1088 * @data: data from userspace 1089 * @filp: file private 1090 * 1091 * Wait for the command submission identified by handle to finish. 1092 */ 1093 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, 1094 struct drm_file *filp) 1095 { 1096 union drm_amdgpu_wait_cs *wait = data; 1097 struct amdgpu_device *adev = dev->dev_private; 1098 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); 1099 struct amdgpu_ring *ring = NULL; 1100 struct amdgpu_ctx *ctx; 1101 struct dma_fence *fence; 1102 long r; 1103 1104 r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance, 1105 wait->in.ring, &ring); 1106 if (r) 1107 return r; 1108 1109 ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id); 1110 if (ctx == NULL) 1111 return -EINVAL; 1112 1113 fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle); 1114 if (IS_ERR(fence)) 1115 r = PTR_ERR(fence); 1116 else if (fence) { 1117 r = dma_fence_wait_timeout(fence, true, timeout); 1118 dma_fence_put(fence); 1119 } else 1120 r = 1; 1121 1122 amdgpu_ctx_put(ctx); 1123 if (r < 0) 1124 return r; 1125 1126 memset(wait, 0, sizeof(*wait)); 1127 wait->out.status = (r == 0); 1128 1129 return 0; 1130 } 1131 1132 /** 1133 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence 1134 * 1135 * @adev: amdgpu device 1136 * @filp: file private 1137 * @user: drm_amdgpu_fence copied from user space 1138 */ 1139 static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev, 1140 struct drm_file *filp, 1141 struct drm_amdgpu_fence *user) 1142 { 1143 struct amdgpu_ring *ring; 1144 struct amdgpu_ctx *ctx; 1145 struct dma_fence *fence; 1146 int r; 1147 1148 r = amdgpu_cs_get_ring(adev, user->ip_type, user->ip_instance, 1149 user->ring, &ring); 1150 if (r) 1151 return ERR_PTR(r); 1152 1153 ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id); 1154 if (ctx == NULL) 1155 return ERR_PTR(-EINVAL); 1156 1157 fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no); 1158 amdgpu_ctx_put(ctx); 1159 1160 return fence; 1161 } 1162 1163 /** 1164 * amdgpu_cs_wait_all_fence - wait on all fences to signal 1165 * 1166 * @adev: amdgpu device 1167 * @filp: file private 1168 * @wait: wait parameters 1169 * @fences: array of drm_amdgpu_fence 1170 */ 1171 static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev, 1172 struct drm_file *filp, 1173 union drm_amdgpu_wait_fences *wait, 1174 struct drm_amdgpu_fence *fences) 1175 { 1176 uint32_t fence_count = wait->in.fence_count; 1177 unsigned int i; 1178 long r = 1; 1179 1180 for (i = 0; i < fence_count; i++) { 1181 struct dma_fence *fence; 1182 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns); 1183 1184 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); 1185 if (IS_ERR(fence)) 1186 return PTR_ERR(fence); 1187 else if (!fence) 1188 continue; 1189 1190 r = dma_fence_wait_timeout(fence, true, timeout); 1191 if (r < 0) 1192 return r; 1193 1194 if (r == 0) 1195 break; 1196 } 1197 1198 memset(wait, 0, sizeof(*wait)); 1199 wait->out.status = (r > 0); 1200 1201 return 0; 1202 } 1203 1204 /** 1205 * amdgpu_cs_wait_any_fence - wait on any fence to signal 1206 * 1207 * @adev: amdgpu device 1208 * @filp: file private 1209 * @wait: wait parameters 1210 * @fences: array of drm_amdgpu_fence 1211 */ 1212 static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev, 1213 struct drm_file *filp, 1214 union drm_amdgpu_wait_fences *wait, 1215 struct drm_amdgpu_fence *fences) 1216 { 1217 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns); 1218 uint32_t fence_count = wait->in.fence_count; 1219 uint32_t first = ~0; 1220 struct dma_fence **array; 1221 unsigned int i; 1222 long r; 1223 1224 /* Prepare the fence array */ 1225 array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL); 1226 1227 if (array == NULL) 1228 return -ENOMEM; 1229 1230 for (i = 0; i < fence_count; i++) { 1231 struct dma_fence *fence; 1232 1233 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); 1234 if (IS_ERR(fence)) { 1235 r = PTR_ERR(fence); 1236 goto err_free_fence_array; 1237 } else if (fence) { 1238 array[i] = fence; 1239 } else { /* NULL, the fence has been already signaled */ 1240 r = 1; 1241 goto out; 1242 } 1243 } 1244 1245 r = dma_fence_wait_any_timeout(array, fence_count, true, timeout, 1246 &first); 1247 if (r < 0) 1248 goto err_free_fence_array; 1249 1250 out: 1251 memset(wait, 0, sizeof(*wait)); 1252 wait->out.status = (r > 0); 1253 wait->out.first_signaled = first; 1254 /* set return value 0 to indicate success */ 1255 r = 0; 1256 1257 err_free_fence_array: 1258 for (i = 0; i < fence_count; i++) 1259 dma_fence_put(array[i]); 1260 kfree(array); 1261 1262 return r; 1263 } 1264 1265 /** 1266 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish 1267 * 1268 * @dev: drm device 1269 * @data: data from userspace 1270 * @filp: file private 1271 */ 1272 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, 1273 struct drm_file *filp) 1274 { 1275 struct amdgpu_device *adev = dev->dev_private; 1276 union drm_amdgpu_wait_fences *wait = data; 1277 uint32_t fence_count = wait->in.fence_count; 1278 struct drm_amdgpu_fence *fences_user; 1279 struct drm_amdgpu_fence *fences; 1280 int r; 1281 1282 /* Get the fences from userspace */ 1283 fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence), 1284 GFP_KERNEL); 1285 if (fences == NULL) 1286 return -ENOMEM; 1287 1288 fences_user = (void __user *)(unsigned long)(wait->in.fences); 1289 if (copy_from_user(fences, fences_user, 1290 sizeof(struct drm_amdgpu_fence) * fence_count)) { 1291 r = -EFAULT; 1292 goto err_free_fences; 1293 } 1294 1295 if (wait->in.wait_all) 1296 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences); 1297 else 1298 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences); 1299 1300 err_free_fences: 1301 kfree(fences); 1302 1303 return r; 1304 } 1305 1306 /** 1307 * amdgpu_cs_find_bo_va - find bo_va for VM address 1308 * 1309 * @parser: command submission parser context 1310 * @addr: VM address 1311 * @bo: resulting BO of the mapping found 1312 * 1313 * Search the buffer objects in the command submission context for a certain 1314 * virtual memory address. Returns allocation structure when found, NULL 1315 * otherwise. 1316 */ 1317 struct amdgpu_bo_va_mapping * 1318 amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, 1319 uint64_t addr, struct amdgpu_bo **bo) 1320 { 1321 struct amdgpu_bo_va_mapping *mapping; 1322 unsigned i; 1323 1324 if (!parser->bo_list) 1325 return NULL; 1326 1327 addr /= AMDGPU_GPU_PAGE_SIZE; 1328 1329 for (i = 0; i < parser->bo_list->num_entries; i++) { 1330 struct amdgpu_bo_list_entry *lobj; 1331 1332 lobj = &parser->bo_list->array[i]; 1333 if (!lobj->bo_va) 1334 continue; 1335 1336 list_for_each_entry(mapping, &lobj->bo_va->valids, list) { 1337 if (mapping->it.start > addr || 1338 addr > mapping->it.last) 1339 continue; 1340 1341 *bo = lobj->bo_va->bo; 1342 return mapping; 1343 } 1344 1345 list_for_each_entry(mapping, &lobj->bo_va->invalids, list) { 1346 if (mapping->it.start > addr || 1347 addr > mapping->it.last) 1348 continue; 1349 1350 *bo = lobj->bo_va->bo; 1351 return mapping; 1352 } 1353 } 1354 1355 return NULL; 1356 } 1357 1358 /** 1359 * amdgpu_cs_sysvm_access_required - make BOs accessible by the system VM 1360 * 1361 * @parser: command submission parser context 1362 * 1363 * Helper for UVD/VCE VM emulation, make sure BOs are accessible by the system VM. 1364 */ 1365 int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser) 1366 { 1367 unsigned i; 1368 int r; 1369 1370 if (!parser->bo_list) 1371 return 0; 1372 1373 for (i = 0; i < parser->bo_list->num_entries; i++) { 1374 struct amdgpu_bo *bo = parser->bo_list->array[i].robj; 1375 1376 r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); 1377 if (unlikely(r)) 1378 return r; 1379 1380 if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) 1381 continue; 1382 1383 bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 1384 amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains); 1385 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 1386 if (unlikely(r)) 1387 return r; 1388 } 1389 1390 return 0; 1391 } 1392