1 /* 2 * Copyright (C) 2008 Ben Skeggs. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sublicense, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial 15 * portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 * 25 */ 26 27 #include <subdev/fb.h> 28 29 #include "nouveau_drm.h" 30 #include "nouveau_dma.h" 31 #include "nouveau_fence.h" 32 #include "nouveau_abi16.h" 33 34 #include "nouveau_ttm.h" 35 #include "nouveau_gem.h" 36 37 int 38 nouveau_gem_object_new(struct drm_gem_object *gem) 39 { 40 return 0; 41 } 42 43 void 44 nouveau_gem_object_del(struct drm_gem_object *gem) 45 { 46 struct nouveau_bo *nvbo = gem->driver_private; 47 struct ttm_buffer_object *bo = &nvbo->bo; 48 49 if (!nvbo) 50 return; 51 nvbo->gem = NULL; 52 53 if (gem->import_attach) 54 drm_prime_gem_destroy(gem, nvbo->bo.sg); 55 56 ttm_bo_unref(&bo); 57 58 drm_gem_object_release(gem); 59 kfree(gem); 60 } 61 62 int 63 nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) 64 { 65 struct nouveau_cli *cli = nouveau_cli(file_priv); 66 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 67 struct nouveau_vma *vma; 68 int ret; 69 70 if (!cli->base.vm) 71 return 0; 72 73 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); 74 if (ret) 75 return ret; 76 77 vma = nouveau_bo_vma_find(nvbo, cli->base.vm); 78 if (!vma) { 79 vma = kzalloc(sizeof(*vma), GFP_KERNEL); 80 if (!vma) { 81 ret = -ENOMEM; 82 goto out; 83 } 84 85 ret = nouveau_bo_vma_add(nvbo, cli->base.vm, vma); 86 if (ret) { 87 kfree(vma); 88 goto out; 89 } 90 } else { 91 vma->refcount++; 92 } 93 94 out: 95 ttm_bo_unreserve(&nvbo->bo); 96 return ret; 97 } 98 99 static void 100 nouveau_gem_object_delete(void *data) 101 { 102 struct nouveau_vma *vma = data; 103 nouveau_vm_unmap(vma); 104 nouveau_vm_put(vma); 105 kfree(vma); 106 } 107 108 static void 109 nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) 110 { 111 const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM; 112 struct nouveau_fence *fence = NULL; 113 114 list_del(&vma->head); 115 116 if (mapped) { 117 spin_lock(&nvbo->bo.bdev->fence_lock); 118 if (nvbo->bo.sync_obj) 119 fence = nouveau_fence_ref(nvbo->bo.sync_obj); 120 spin_unlock(&nvbo->bo.bdev->fence_lock); 121 } 122 123 if (fence) { 124 nouveau_fence_work(fence, nouveau_gem_object_delete, vma); 125 } else { 126 if (mapped) 127 nouveau_vm_unmap(vma); 128 nouveau_vm_put(vma); 129 kfree(vma); 130 } 131 nouveau_fence_unref(&fence); 132 } 133 134 void 135 nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) 136 { 137 struct nouveau_cli *cli = nouveau_cli(file_priv); 138 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 139 struct nouveau_vma *vma; 140 int ret; 141 142 if (!cli->base.vm) 143 return; 144 145 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); 146 if (ret) 147 return; 148 149 vma = nouveau_bo_vma_find(nvbo, cli->base.vm); 150 if (vma) { 151 if (--vma->refcount == 0) 152 nouveau_gem_object_unmap(nvbo, vma); 153 } 154 ttm_bo_unreserve(&nvbo->bo); 155 } 156 157 int 158 nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain, 159 uint32_t tile_mode, uint32_t tile_flags, 160 struct nouveau_bo **pnvbo) 161 { 162 struct nouveau_drm *drm = nouveau_drm(dev); 163 struct nouveau_bo *nvbo; 164 u32 flags = 0; 165 int ret; 166 167 if (domain & NOUVEAU_GEM_DOMAIN_VRAM) 168 flags |= TTM_PL_FLAG_VRAM; 169 if (domain & NOUVEAU_GEM_DOMAIN_GART) 170 flags |= TTM_PL_FLAG_TT; 171 if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU) 172 flags |= TTM_PL_FLAG_SYSTEM; 173 174 ret = nouveau_bo_new(dev, size, align, flags, tile_mode, 175 tile_flags, NULL, pnvbo); 176 if (ret) 177 return ret; 178 nvbo = *pnvbo; 179 180 /* we restrict allowed domains on nv50+ to only the types 181 * that were requested at creation time. not possibly on 182 * earlier chips without busting the ABI. 183 */ 184 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | 185 NOUVEAU_GEM_DOMAIN_GART; 186 if (nv_device(drm->device)->card_type >= NV_50) 187 nvbo->valid_domains &= domain; 188 189 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); 190 if (!nvbo->gem) { 191 nouveau_bo_ref(NULL, pnvbo); 192 return -ENOMEM; 193 } 194 195 nvbo->bo.persistent_swap_storage = nvbo->gem->filp; 196 nvbo->gem->driver_private = nvbo; 197 return 0; 198 } 199 200 static int 201 nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, 202 struct drm_nouveau_gem_info *rep) 203 { 204 struct nouveau_cli *cli = nouveau_cli(file_priv); 205 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 206 struct nouveau_vma *vma; 207 208 if (nvbo->bo.mem.mem_type == TTM_PL_TT) 209 rep->domain = NOUVEAU_GEM_DOMAIN_GART; 210 else 211 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; 212 213 rep->offset = nvbo->bo.offset; 214 if (cli->base.vm) { 215 vma = nouveau_bo_vma_find(nvbo, cli->base.vm); 216 if (!vma) 217 return -EINVAL; 218 219 rep->offset = vma->offset; 220 } 221 222 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; 223 rep->map_handle = nvbo->bo.addr_space_offset; 224 rep->tile_mode = nvbo->tile_mode; 225 rep->tile_flags = nvbo->tile_flags; 226 return 0; 227 } 228 229 int 230 nouveau_gem_ioctl_new(struct drm_device *dev, void *data, 231 struct drm_file *file_priv) 232 { 233 struct nouveau_drm *drm = nouveau_drm(dev); 234 struct nouveau_cli *cli = nouveau_cli(file_priv); 235 struct nouveau_fb *pfb = nouveau_fb(drm->device); 236 struct drm_nouveau_gem_new *req = data; 237 struct nouveau_bo *nvbo = NULL; 238 int ret = 0; 239 240 drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping; 241 242 if (!pfb->memtype_valid(pfb, req->info.tile_flags)) { 243 NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags); 244 return -EINVAL; 245 } 246 247 ret = nouveau_gem_new(dev, req->info.size, req->align, 248 req->info.domain, req->info.tile_mode, 249 req->info.tile_flags, &nvbo); 250 if (ret) 251 return ret; 252 253 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); 254 if (ret == 0) { 255 ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info); 256 if (ret) 257 drm_gem_handle_delete(file_priv, req->info.handle); 258 } 259 260 /* drop reference from allocate - handle holds it now */ 261 drm_gem_object_unreference_unlocked(nvbo->gem); 262 return ret; 263 } 264 265 static int 266 nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, 267 uint32_t write_domains, uint32_t valid_domains) 268 { 269 struct nouveau_bo *nvbo = gem->driver_private; 270 struct ttm_buffer_object *bo = &nvbo->bo; 271 uint32_t domains = valid_domains & nvbo->valid_domains & 272 (write_domains ? write_domains : read_domains); 273 uint32_t pref_flags = 0, valid_flags = 0; 274 275 if (!domains) 276 return -EINVAL; 277 278 if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) 279 valid_flags |= TTM_PL_FLAG_VRAM; 280 281 if (valid_domains & NOUVEAU_GEM_DOMAIN_GART) 282 valid_flags |= TTM_PL_FLAG_TT; 283 284 if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) && 285 bo->mem.mem_type == TTM_PL_VRAM) 286 pref_flags |= TTM_PL_FLAG_VRAM; 287 288 else if ((domains & NOUVEAU_GEM_DOMAIN_GART) && 289 bo->mem.mem_type == TTM_PL_TT) 290 pref_flags |= TTM_PL_FLAG_TT; 291 292 else if (domains & NOUVEAU_GEM_DOMAIN_VRAM) 293 pref_flags |= TTM_PL_FLAG_VRAM; 294 295 else 296 pref_flags |= TTM_PL_FLAG_TT; 297 298 nouveau_bo_placement_set(nvbo, pref_flags, valid_flags); 299 300 return 0; 301 } 302 303 struct validate_op { 304 struct list_head vram_list; 305 struct list_head gart_list; 306 struct list_head both_list; 307 struct ww_acquire_ctx ticket; 308 }; 309 310 static void 311 validate_fini_list(struct list_head *list, struct nouveau_fence *fence, 312 struct ww_acquire_ctx *ticket) 313 { 314 struct list_head *entry, *tmp; 315 struct nouveau_bo *nvbo; 316 317 list_for_each_safe(entry, tmp, list) { 318 nvbo = list_entry(entry, struct nouveau_bo, entry); 319 320 nouveau_bo_fence(nvbo, fence); 321 322 if (unlikely(nvbo->validate_mapped)) { 323 ttm_bo_kunmap(&nvbo->kmap); 324 nvbo->validate_mapped = false; 325 } 326 327 list_del(&nvbo->entry); 328 nvbo->reserved_by = NULL; 329 ttm_bo_unreserve_ticket(&nvbo->bo, ticket); 330 drm_gem_object_unreference_unlocked(nvbo->gem); 331 } 332 } 333 334 static void 335 validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence) 336 { 337 validate_fini_list(&op->vram_list, fence, &op->ticket); 338 validate_fini_list(&op->gart_list, fence, &op->ticket); 339 validate_fini_list(&op->both_list, fence, &op->ticket); 340 } 341 342 static void 343 validate_fini(struct validate_op *op, struct nouveau_fence *fence) 344 { 345 validate_fini_no_ticket(op, fence); 346 ww_acquire_fini(&op->ticket); 347 } 348 349 static int 350 validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, 351 struct drm_nouveau_gem_pushbuf_bo *pbbo, 352 int nr_buffers, struct validate_op *op) 353 { 354 struct nouveau_cli *cli = nouveau_cli(file_priv); 355 struct drm_device *dev = chan->drm->dev; 356 int trycnt = 0; 357 int ret, i; 358 struct nouveau_bo *res_bo = NULL; 359 360 ww_acquire_init(&op->ticket, &reservation_ww_class); 361 retry: 362 if (++trycnt > 100000) { 363 NV_ERROR(cli, "%s failed and gave up.\n", __func__); 364 return -EINVAL; 365 } 366 367 for (i = 0; i < nr_buffers; i++) { 368 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i]; 369 struct drm_gem_object *gem; 370 struct nouveau_bo *nvbo; 371 372 gem = drm_gem_object_lookup(dev, file_priv, b->handle); 373 if (!gem) { 374 NV_ERROR(cli, "Unknown handle 0x%08x\n", b->handle); 375 ww_acquire_done(&op->ticket); 376 validate_fini(op, NULL); 377 return -ENOENT; 378 } 379 nvbo = gem->driver_private; 380 if (nvbo == res_bo) { 381 res_bo = NULL; 382 drm_gem_object_unreference_unlocked(gem); 383 continue; 384 } 385 386 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { 387 NV_ERROR(cli, "multiple instances of buffer %d on " 388 "validation list\n", b->handle); 389 drm_gem_object_unreference_unlocked(gem); 390 ww_acquire_done(&op->ticket); 391 validate_fini(op, NULL); 392 return -EINVAL; 393 } 394 395 ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket); 396 if (ret) { 397 validate_fini_no_ticket(op, NULL); 398 if (unlikely(ret == -EDEADLK)) { 399 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true, 400 &op->ticket); 401 if (!ret) 402 res_bo = nvbo; 403 } 404 if (unlikely(ret)) { 405 ww_acquire_done(&op->ticket); 406 ww_acquire_fini(&op->ticket); 407 drm_gem_object_unreference_unlocked(gem); 408 if (ret != -ERESTARTSYS) 409 NV_ERROR(cli, "fail reserve\n"); 410 return ret; 411 } 412 } 413 414 b->user_priv = (uint64_t)(unsigned long)nvbo; 415 nvbo->reserved_by = file_priv; 416 nvbo->pbbo_index = i; 417 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && 418 (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)) 419 list_add_tail(&nvbo->entry, &op->both_list); 420 else 421 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) 422 list_add_tail(&nvbo->entry, &op->vram_list); 423 else 424 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART) 425 list_add_tail(&nvbo->entry, &op->gart_list); 426 else { 427 NV_ERROR(cli, "invalid valid domains: 0x%08x\n", 428 b->valid_domains); 429 list_add_tail(&nvbo->entry, &op->both_list); 430 ww_acquire_done(&op->ticket); 431 validate_fini(op, NULL); 432 return -EINVAL; 433 } 434 if (nvbo == res_bo) 435 goto retry; 436 } 437 438 ww_acquire_done(&op->ticket); 439 return 0; 440 } 441 442 static int 443 validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo) 444 { 445 struct nouveau_fence *fence = NULL; 446 int ret = 0; 447 448 spin_lock(&nvbo->bo.bdev->fence_lock); 449 if (nvbo->bo.sync_obj) 450 fence = nouveau_fence_ref(nvbo->bo.sync_obj); 451 spin_unlock(&nvbo->bo.bdev->fence_lock); 452 453 if (fence) { 454 ret = nouveau_fence_sync(fence, chan); 455 nouveau_fence_unref(&fence); 456 } 457 458 return ret; 459 } 460 461 static int 462 validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli, 463 struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo, 464 uint64_t user_pbbo_ptr) 465 { 466 struct nouveau_drm *drm = chan->drm; 467 struct drm_nouveau_gem_pushbuf_bo __user *upbbo = 468 (void __force __user *)(uintptr_t)user_pbbo_ptr; 469 struct nouveau_bo *nvbo; 470 int ret, relocs = 0; 471 472 list_for_each_entry(nvbo, list, entry) { 473 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; 474 475 ret = validate_sync(chan, nvbo); 476 if (unlikely(ret)) { 477 NV_ERROR(cli, "fail pre-validate sync\n"); 478 return ret; 479 } 480 481 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, 482 b->write_domains, 483 b->valid_domains); 484 if (unlikely(ret)) { 485 NV_ERROR(cli, "fail set_domain\n"); 486 return ret; 487 } 488 489 ret = nouveau_bo_validate(nvbo, true, false); 490 if (unlikely(ret)) { 491 if (ret != -ERESTARTSYS) 492 NV_ERROR(cli, "fail ttm_validate\n"); 493 return ret; 494 } 495 496 ret = validate_sync(chan, nvbo); 497 if (unlikely(ret)) { 498 NV_ERROR(cli, "fail post-validate sync\n"); 499 return ret; 500 } 501 502 if (nv_device(drm->device)->card_type < NV_50) { 503 if (nvbo->bo.offset == b->presumed.offset && 504 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && 505 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || 506 (nvbo->bo.mem.mem_type == TTM_PL_TT && 507 b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART))) 508 continue; 509 510 if (nvbo->bo.mem.mem_type == TTM_PL_TT) 511 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART; 512 else 513 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM; 514 b->presumed.offset = nvbo->bo.offset; 515 b->presumed.valid = 0; 516 relocs++; 517 518 if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed, 519 &b->presumed, sizeof(b->presumed))) 520 return -EFAULT; 521 } 522 } 523 524 return relocs; 525 } 526 527 static int 528 nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, 529 struct drm_file *file_priv, 530 struct drm_nouveau_gem_pushbuf_bo *pbbo, 531 uint64_t user_buffers, int nr_buffers, 532 struct validate_op *op, int *apply_relocs) 533 { 534 struct nouveau_cli *cli = nouveau_cli(file_priv); 535 int ret, relocs = 0; 536 537 INIT_LIST_HEAD(&op->vram_list); 538 INIT_LIST_HEAD(&op->gart_list); 539 INIT_LIST_HEAD(&op->both_list); 540 541 if (nr_buffers == 0) 542 return 0; 543 544 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); 545 if (unlikely(ret)) { 546 if (ret != -ERESTARTSYS) 547 NV_ERROR(cli, "validate_init\n"); 548 return ret; 549 } 550 551 ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers); 552 if (unlikely(ret < 0)) { 553 if (ret != -ERESTARTSYS) 554 NV_ERROR(cli, "validate vram_list\n"); 555 validate_fini(op, NULL); 556 return ret; 557 } 558 relocs += ret; 559 560 ret = validate_list(chan, cli, &op->gart_list, pbbo, user_buffers); 561 if (unlikely(ret < 0)) { 562 if (ret != -ERESTARTSYS) 563 NV_ERROR(cli, "validate gart_list\n"); 564 validate_fini(op, NULL); 565 return ret; 566 } 567 relocs += ret; 568 569 ret = validate_list(chan, cli, &op->both_list, pbbo, user_buffers); 570 if (unlikely(ret < 0)) { 571 if (ret != -ERESTARTSYS) 572 NV_ERROR(cli, "validate both_list\n"); 573 validate_fini(op, NULL); 574 return ret; 575 } 576 relocs += ret; 577 578 *apply_relocs = relocs; 579 return 0; 580 } 581 582 static inline void * 583 u_memcpya(uint64_t user, unsigned nmemb, unsigned size) 584 { 585 void *mem; 586 void __user *userptr = (void __force __user *)(uintptr_t)user; 587 588 mem = kmalloc(nmemb * size, GFP_KERNEL); 589 if (!mem) 590 return ERR_PTR(-ENOMEM); 591 592 if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) { 593 kfree(mem); 594 return ERR_PTR(-EFAULT); 595 } 596 597 return mem; 598 } 599 600 static int 601 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, 602 struct drm_nouveau_gem_pushbuf *req, 603 struct drm_nouveau_gem_pushbuf_bo *bo) 604 { 605 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; 606 int ret = 0; 607 unsigned i; 608 609 reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc)); 610 if (IS_ERR(reloc)) 611 return PTR_ERR(reloc); 612 613 for (i = 0; i < req->nr_relocs; i++) { 614 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i]; 615 struct drm_nouveau_gem_pushbuf_bo *b; 616 struct nouveau_bo *nvbo; 617 uint32_t data; 618 619 if (unlikely(r->bo_index > req->nr_buffers)) { 620 NV_ERROR(cli, "reloc bo index invalid\n"); 621 ret = -EINVAL; 622 break; 623 } 624 625 b = &bo[r->bo_index]; 626 if (b->presumed.valid) 627 continue; 628 629 if (unlikely(r->reloc_bo_index > req->nr_buffers)) { 630 NV_ERROR(cli, "reloc container bo index invalid\n"); 631 ret = -EINVAL; 632 break; 633 } 634 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv; 635 636 if (unlikely(r->reloc_bo_offset + 4 > 637 nvbo->bo.mem.num_pages << PAGE_SHIFT)) { 638 NV_ERROR(cli, "reloc outside of bo\n"); 639 ret = -EINVAL; 640 break; 641 } 642 643 if (!nvbo->kmap.virtual) { 644 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, 645 &nvbo->kmap); 646 if (ret) { 647 NV_ERROR(cli, "failed kmap for reloc\n"); 648 break; 649 } 650 nvbo->validate_mapped = true; 651 } 652 653 if (r->flags & NOUVEAU_GEM_RELOC_LOW) 654 data = b->presumed.offset + r->data; 655 else 656 if (r->flags & NOUVEAU_GEM_RELOC_HIGH) 657 data = (b->presumed.offset + r->data) >> 32; 658 else 659 data = r->data; 660 661 if (r->flags & NOUVEAU_GEM_RELOC_OR) { 662 if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART) 663 data |= r->tor; 664 else 665 data |= r->vor; 666 } 667 668 spin_lock(&nvbo->bo.bdev->fence_lock); 669 ret = ttm_bo_wait(&nvbo->bo, false, false, false); 670 spin_unlock(&nvbo->bo.bdev->fence_lock); 671 if (ret) { 672 NV_ERROR(cli, "reloc wait_idle failed: %d\n", ret); 673 break; 674 } 675 676 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data); 677 } 678 679 kfree(reloc); 680 return ret; 681 } 682 683 int 684 nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, 685 struct drm_file *file_priv) 686 { 687 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); 688 struct nouveau_cli *cli = nouveau_cli(file_priv); 689 struct nouveau_abi16_chan *temp; 690 struct nouveau_drm *drm = nouveau_drm(dev); 691 struct drm_nouveau_gem_pushbuf *req = data; 692 struct drm_nouveau_gem_pushbuf_push *push; 693 struct drm_nouveau_gem_pushbuf_bo *bo; 694 struct nouveau_channel *chan = NULL; 695 struct validate_op op; 696 struct nouveau_fence *fence = NULL; 697 int i, j, ret = 0, do_reloc = 0; 698 699 if (unlikely(!abi16)) 700 return -ENOMEM; 701 702 list_for_each_entry(temp, &abi16->channels, head) { 703 if (temp->chan->handle == (NVDRM_CHAN | req->channel)) { 704 chan = temp->chan; 705 break; 706 } 707 } 708 709 if (!chan) 710 return nouveau_abi16_put(abi16, -ENOENT); 711 712 req->vram_available = drm->gem.vram_available; 713 req->gart_available = drm->gem.gart_available; 714 if (unlikely(req->nr_push == 0)) 715 goto out_next; 716 717 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { 718 NV_ERROR(cli, "pushbuf push count exceeds limit: %d max %d\n", 719 req->nr_push, NOUVEAU_GEM_MAX_PUSH); 720 return nouveau_abi16_put(abi16, -EINVAL); 721 } 722 723 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { 724 NV_ERROR(cli, "pushbuf bo count exceeds limit: %d max %d\n", 725 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); 726 return nouveau_abi16_put(abi16, -EINVAL); 727 } 728 729 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { 730 NV_ERROR(cli, "pushbuf reloc count exceeds limit: %d max %d\n", 731 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); 732 return nouveau_abi16_put(abi16, -EINVAL); 733 } 734 735 push = u_memcpya(req->push, req->nr_push, sizeof(*push)); 736 if (IS_ERR(push)) 737 return nouveau_abi16_put(abi16, PTR_ERR(push)); 738 739 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); 740 if (IS_ERR(bo)) { 741 kfree(push); 742 return nouveau_abi16_put(abi16, PTR_ERR(bo)); 743 } 744 745 /* Ensure all push buffers are on validate list */ 746 for (i = 0; i < req->nr_push; i++) { 747 if (push[i].bo_index >= req->nr_buffers) { 748 NV_ERROR(cli, "push %d buffer not in list\n", i); 749 ret = -EINVAL; 750 goto out_prevalid; 751 } 752 } 753 754 /* Validate buffer list */ 755 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, 756 req->nr_buffers, &op, &do_reloc); 757 if (ret) { 758 if (ret != -ERESTARTSYS) 759 NV_ERROR(cli, "validate: %d\n", ret); 760 goto out_prevalid; 761 } 762 763 /* Apply any relocations that are required */ 764 if (do_reloc) { 765 ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo); 766 if (ret) { 767 NV_ERROR(cli, "reloc apply: %d\n", ret); 768 goto out; 769 } 770 } 771 772 if (chan->dma.ib_max) { 773 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16); 774 if (ret) { 775 NV_ERROR(cli, "nv50cal_space: %d\n", ret); 776 goto out; 777 } 778 779 for (i = 0; i < req->nr_push; i++) { 780 struct nouveau_bo *nvbo = (void *)(unsigned long) 781 bo[push[i].bo_index].user_priv; 782 783 nv50_dma_push(chan, nvbo, push[i].offset, 784 push[i].length); 785 } 786 } else 787 if (nv_device(drm->device)->chipset >= 0x25) { 788 ret = RING_SPACE(chan, req->nr_push * 2); 789 if (ret) { 790 NV_ERROR(cli, "cal_space: %d\n", ret); 791 goto out; 792 } 793 794 for (i = 0; i < req->nr_push; i++) { 795 struct nouveau_bo *nvbo = (void *)(unsigned long) 796 bo[push[i].bo_index].user_priv; 797 798 OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2); 799 OUT_RING(chan, 0); 800 } 801 } else { 802 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS)); 803 if (ret) { 804 NV_ERROR(cli, "jmp_space: %d\n", ret); 805 goto out; 806 } 807 808 for (i = 0; i < req->nr_push; i++) { 809 struct nouveau_bo *nvbo = (void *)(unsigned long) 810 bo[push[i].bo_index].user_priv; 811 uint32_t cmd; 812 813 cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2); 814 cmd |= 0x20000000; 815 if (unlikely(cmd != req->suffix0)) { 816 if (!nvbo->kmap.virtual) { 817 ret = ttm_bo_kmap(&nvbo->bo, 0, 818 nvbo->bo.mem. 819 num_pages, 820 &nvbo->kmap); 821 if (ret) { 822 WIND_RING(chan); 823 goto out; 824 } 825 nvbo->validate_mapped = true; 826 } 827 828 nouveau_bo_wr32(nvbo, (push[i].offset + 829 push[i].length - 8) / 4, cmd); 830 } 831 832 OUT_RING(chan, 0x20000000 | 833 (nvbo->bo.offset + push[i].offset)); 834 OUT_RING(chan, 0); 835 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++) 836 OUT_RING(chan, 0); 837 } 838 } 839 840 ret = nouveau_fence_new(chan, false, &fence); 841 if (ret) { 842 NV_ERROR(cli, "error fencing pushbuf: %d\n", ret); 843 WIND_RING(chan); 844 goto out; 845 } 846 847 out: 848 validate_fini(&op, fence); 849 nouveau_fence_unref(&fence); 850 851 out_prevalid: 852 kfree(bo); 853 kfree(push); 854 855 out_next: 856 if (chan->dma.ib_max) { 857 req->suffix0 = 0x00000000; 858 req->suffix1 = 0x00000000; 859 } else 860 if (nv_device(drm->device)->chipset >= 0x25) { 861 req->suffix0 = 0x00020000; 862 req->suffix1 = 0x00000000; 863 } else { 864 req->suffix0 = 0x20000000 | 865 (chan->push.vma.offset + ((chan->dma.cur + 2) << 2)); 866 req->suffix1 = 0x00000000; 867 } 868 869 return nouveau_abi16_put(abi16, ret); 870 } 871 872 static inline uint32_t 873 domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain) 874 { 875 uint32_t flags = 0; 876 877 if (domain & NOUVEAU_GEM_DOMAIN_VRAM) 878 flags |= TTM_PL_FLAG_VRAM; 879 if (domain & NOUVEAU_GEM_DOMAIN_GART) 880 flags |= TTM_PL_FLAG_TT; 881 882 return flags; 883 } 884 885 int 886 nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, 887 struct drm_file *file_priv) 888 { 889 struct drm_nouveau_gem_cpu_prep *req = data; 890 struct drm_gem_object *gem; 891 struct nouveau_bo *nvbo; 892 bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT); 893 int ret = -EINVAL; 894 895 gem = drm_gem_object_lookup(dev, file_priv, req->handle); 896 if (!gem) 897 return -ENOENT; 898 nvbo = nouveau_gem_object(gem); 899 900 spin_lock(&nvbo->bo.bdev->fence_lock); 901 ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait); 902 spin_unlock(&nvbo->bo.bdev->fence_lock); 903 drm_gem_object_unreference_unlocked(gem); 904 return ret; 905 } 906 907 int 908 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, 909 struct drm_file *file_priv) 910 { 911 return 0; 912 } 913 914 int 915 nouveau_gem_ioctl_info(struct drm_device *dev, void *data, 916 struct drm_file *file_priv) 917 { 918 struct drm_nouveau_gem_info *req = data; 919 struct drm_gem_object *gem; 920 int ret; 921 922 gem = drm_gem_object_lookup(dev, file_priv, req->handle); 923 if (!gem) 924 return -ENOENT; 925 926 ret = nouveau_gem_info(file_priv, gem, req); 927 drm_gem_object_unreference_unlocked(gem); 928 return ret; 929 } 930 931