1 /* 2 * Copyright (C) 2008 Ben Skeggs. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sublicense, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial 15 * portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 * 25 */ 26 27 #include "nouveau_drm.h" 28 #include "nouveau_dma.h" 29 #include "nouveau_fence.h" 30 #include "nouveau_abi16.h" 31 32 #include "nouveau_ttm.h" 33 #include "nouveau_gem.h" 34 35 void 36 nouveau_gem_object_del(struct drm_gem_object *gem) 37 { 38 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 39 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 40 struct ttm_buffer_object *bo = &nvbo->bo; 41 struct device *dev = drm->dev->dev; 42 int ret; 43 44 ret = pm_runtime_get_sync(dev); 45 if (WARN_ON(ret < 0 && ret != -EACCES)) 46 return; 47 48 if (gem->import_attach) 49 drm_prime_gem_destroy(gem, nvbo->bo.sg); 50 51 drm_gem_object_release(gem); 52 53 /* reset filp so nouveau_bo_del_ttm() can test for it */ 54 gem->filp = NULL; 55 ttm_bo_unref(&bo); 56 57 pm_runtime_mark_last_busy(dev); 58 pm_runtime_put_autosuspend(dev); 59 } 60 61 int 62 nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) 63 { 64 struct nouveau_cli *cli = nouveau_cli(file_priv); 65 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 66 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 67 struct nvkm_vma *vma; 68 struct device *dev = drm->dev->dev; 69 int ret; 70 71 if (!cli->vm) 72 return 0; 73 74 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); 75 if (ret) 76 return ret; 77 78 vma = nouveau_bo_vma_find(nvbo, cli->vm); 79 if (!vma) { 80 vma = kzalloc(sizeof(*vma), GFP_KERNEL); 81 if (!vma) { 82 ret = -ENOMEM; 83 goto out; 84 } 85 86 ret = pm_runtime_get_sync(dev); 87 if (ret < 0 && ret != -EACCES) 88 goto out; 89 90 ret = nouveau_bo_vma_add(nvbo, cli->vm, vma); 91 if (ret) 92 kfree(vma); 93 94 pm_runtime_mark_last_busy(dev); 95 pm_runtime_put_autosuspend(dev); 96 } else { 97 vma->refcount++; 98 } 99 100 out: 101 ttm_bo_unreserve(&nvbo->bo); 102 return ret; 103 } 104 105 static void 106 nouveau_gem_object_delete(void *data) 107 { 108 struct nvkm_vma *vma = data; 109 nvkm_vm_unmap(vma); 110 nvkm_vm_put(vma); 111 kfree(vma); 112 } 113 114 static void 115 nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma) 116 { 117 const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM; 118 struct reservation_object *resv = nvbo->bo.resv; 119 struct reservation_object_list *fobj; 120 struct fence *fence = NULL; 121 122 fobj = reservation_object_get_list(resv); 123 124 list_del(&vma->head); 125 126 if (fobj && fobj->shared_count > 1) 127 ttm_bo_wait(&nvbo->bo, true, false, false); 128 else if (fobj && fobj->shared_count == 1) 129 fence = rcu_dereference_protected(fobj->shared[0], 130 reservation_object_held(resv)); 131 else 132 fence = reservation_object_get_excl(nvbo->bo.resv); 133 134 if (fence && mapped) { 135 nouveau_fence_work(fence, nouveau_gem_object_delete, vma); 136 } else { 137 if (mapped) 138 nvkm_vm_unmap(vma); 139 nvkm_vm_put(vma); 140 kfree(vma); 141 } 142 } 143 144 void 145 nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) 146 { 147 struct nouveau_cli *cli = nouveau_cli(file_priv); 148 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 149 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 150 struct device *dev = drm->dev->dev; 151 struct nvkm_vma *vma; 152 int ret; 153 154 if (!cli->vm) 155 return; 156 157 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); 158 if (ret) 159 return; 160 161 vma = nouveau_bo_vma_find(nvbo, cli->vm); 162 if (vma) { 163 if (--vma->refcount == 0) { 164 ret = pm_runtime_get_sync(dev); 165 if (!WARN_ON(ret < 0 && ret != -EACCES)) { 166 nouveau_gem_object_unmap(nvbo, vma); 167 pm_runtime_mark_last_busy(dev); 168 pm_runtime_put_autosuspend(dev); 169 } 170 } 171 } 172 ttm_bo_unreserve(&nvbo->bo); 173 } 174 175 int 176 nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain, 177 uint32_t tile_mode, uint32_t tile_flags, 178 struct nouveau_bo **pnvbo) 179 { 180 struct nouveau_drm *drm = nouveau_drm(dev); 181 struct nouveau_bo *nvbo; 182 u32 flags = 0; 183 int ret; 184 185 if (domain & NOUVEAU_GEM_DOMAIN_VRAM) 186 flags |= TTM_PL_FLAG_VRAM; 187 if (domain & NOUVEAU_GEM_DOMAIN_GART) 188 flags |= TTM_PL_FLAG_TT; 189 if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU) 190 flags |= TTM_PL_FLAG_SYSTEM; 191 192 if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) 193 flags |= TTM_PL_FLAG_UNCACHED; 194 195 ret = nouveau_bo_new(dev, size, align, flags, tile_mode, 196 tile_flags, NULL, NULL, pnvbo); 197 if (ret) 198 return ret; 199 nvbo = *pnvbo; 200 201 /* we restrict allowed domains on nv50+ to only the types 202 * that were requested at creation time. not possibly on 203 * earlier chips without busting the ABI. 204 */ 205 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | 206 NOUVEAU_GEM_DOMAIN_GART; 207 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) 208 nvbo->valid_domains &= domain; 209 210 /* Initialize the embedded gem-object. We return a single gem-reference 211 * to the caller, instead of a normal nouveau_bo ttm reference. */ 212 ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size); 213 if (ret) { 214 nouveau_bo_ref(NULL, pnvbo); 215 return -ENOMEM; 216 } 217 218 nvbo->bo.persistent_swap_storage = nvbo->gem.filp; 219 return 0; 220 } 221 222 static int 223 nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, 224 struct drm_nouveau_gem_info *rep) 225 { 226 struct nouveau_cli *cli = nouveau_cli(file_priv); 227 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 228 struct nvkm_vma *vma; 229 230 if (nvbo->bo.mem.mem_type == TTM_PL_TT) 231 rep->domain = NOUVEAU_GEM_DOMAIN_GART; 232 else 233 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; 234 235 rep->offset = nvbo->bo.offset; 236 if (cli->vm) { 237 vma = nouveau_bo_vma_find(nvbo, cli->vm); 238 if (!vma) 239 return -EINVAL; 240 241 rep->offset = vma->offset; 242 } 243 244 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; 245 rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node); 246 rep->tile_mode = nvbo->tile_mode; 247 rep->tile_flags = nvbo->tile_flags; 248 return 0; 249 } 250 251 int 252 nouveau_gem_ioctl_new(struct drm_device *dev, void *data, 253 struct drm_file *file_priv) 254 { 255 struct nouveau_drm *drm = nouveau_drm(dev); 256 struct nouveau_cli *cli = nouveau_cli(file_priv); 257 struct nvkm_fb *pfb = nvxx_fb(&drm->device); 258 struct drm_nouveau_gem_new *req = data; 259 struct nouveau_bo *nvbo = NULL; 260 int ret = 0; 261 262 if (!pfb->memtype_valid(pfb, req->info.tile_flags)) { 263 NV_PRINTK(error, cli, "bad page flags: 0x%08x\n", req->info.tile_flags); 264 return -EINVAL; 265 } 266 267 ret = nouveau_gem_new(dev, req->info.size, req->align, 268 req->info.domain, req->info.tile_mode, 269 req->info.tile_flags, &nvbo); 270 if (ret) 271 return ret; 272 273 ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle); 274 if (ret == 0) { 275 ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info); 276 if (ret) 277 drm_gem_handle_delete(file_priv, req->info.handle); 278 } 279 280 /* drop reference from allocate - handle holds it now */ 281 drm_gem_object_unreference_unlocked(&nvbo->gem); 282 return ret; 283 } 284 285 static int 286 nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, 287 uint32_t write_domains, uint32_t valid_domains) 288 { 289 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 290 struct ttm_buffer_object *bo = &nvbo->bo; 291 uint32_t domains = valid_domains & nvbo->valid_domains & 292 (write_domains ? write_domains : read_domains); 293 uint32_t pref_flags = 0, valid_flags = 0; 294 295 if (!domains) 296 return -EINVAL; 297 298 if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) 299 valid_flags |= TTM_PL_FLAG_VRAM; 300 301 if (valid_domains & NOUVEAU_GEM_DOMAIN_GART) 302 valid_flags |= TTM_PL_FLAG_TT; 303 304 if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) && 305 bo->mem.mem_type == TTM_PL_VRAM) 306 pref_flags |= TTM_PL_FLAG_VRAM; 307 308 else if ((domains & NOUVEAU_GEM_DOMAIN_GART) && 309 bo->mem.mem_type == TTM_PL_TT) 310 pref_flags |= TTM_PL_FLAG_TT; 311 312 else if (domains & NOUVEAU_GEM_DOMAIN_VRAM) 313 pref_flags |= TTM_PL_FLAG_VRAM; 314 315 else 316 pref_flags |= TTM_PL_FLAG_TT; 317 318 nouveau_bo_placement_set(nvbo, pref_flags, valid_flags); 319 320 return 0; 321 } 322 323 struct validate_op { 324 struct list_head list; 325 struct ww_acquire_ctx ticket; 326 }; 327 328 static void 329 validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence, 330 struct drm_nouveau_gem_pushbuf_bo *pbbo) 331 { 332 struct nouveau_bo *nvbo; 333 struct drm_nouveau_gem_pushbuf_bo *b; 334 335 while (!list_empty(&op->list)) { 336 nvbo = list_entry(op->list.next, struct nouveau_bo, entry); 337 b = &pbbo[nvbo->pbbo_index]; 338 339 if (likely(fence)) 340 nouveau_bo_fence(nvbo, fence, !!b->write_domains); 341 342 if (unlikely(nvbo->validate_mapped)) { 343 ttm_bo_kunmap(&nvbo->kmap); 344 nvbo->validate_mapped = false; 345 } 346 347 list_del(&nvbo->entry); 348 nvbo->reserved_by = NULL; 349 ttm_bo_unreserve_ticket(&nvbo->bo, &op->ticket); 350 drm_gem_object_unreference_unlocked(&nvbo->gem); 351 } 352 } 353 354 static void 355 validate_fini(struct validate_op *op, struct nouveau_fence *fence, 356 struct drm_nouveau_gem_pushbuf_bo *pbbo) 357 { 358 validate_fini_no_ticket(op, fence, pbbo); 359 ww_acquire_fini(&op->ticket); 360 } 361 362 static int 363 validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, 364 struct drm_nouveau_gem_pushbuf_bo *pbbo, 365 int nr_buffers, struct validate_op *op) 366 { 367 struct nouveau_cli *cli = nouveau_cli(file_priv); 368 struct drm_device *dev = chan->drm->dev; 369 int trycnt = 0; 370 int ret, i; 371 struct nouveau_bo *res_bo = NULL; 372 LIST_HEAD(gart_list); 373 LIST_HEAD(vram_list); 374 LIST_HEAD(both_list); 375 376 ww_acquire_init(&op->ticket, &reservation_ww_class); 377 retry: 378 if (++trycnt > 100000) { 379 NV_PRINTK(error, cli, "%s failed and gave up.\n", __func__); 380 return -EINVAL; 381 } 382 383 for (i = 0; i < nr_buffers; i++) { 384 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i]; 385 struct drm_gem_object *gem; 386 struct nouveau_bo *nvbo; 387 388 gem = drm_gem_object_lookup(dev, file_priv, b->handle); 389 if (!gem) { 390 NV_PRINTK(error, cli, "Unknown handle 0x%08x\n", b->handle); 391 ret = -ENOENT; 392 break; 393 } 394 nvbo = nouveau_gem_object(gem); 395 if (nvbo == res_bo) { 396 res_bo = NULL; 397 drm_gem_object_unreference_unlocked(gem); 398 continue; 399 } 400 401 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { 402 NV_PRINTK(error, cli, "multiple instances of buffer %d on " 403 "validation list\n", b->handle); 404 drm_gem_object_unreference_unlocked(gem); 405 ret = -EINVAL; 406 break; 407 } 408 409 ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket); 410 if (ret) { 411 list_splice_tail_init(&vram_list, &op->list); 412 list_splice_tail_init(&gart_list, &op->list); 413 list_splice_tail_init(&both_list, &op->list); 414 validate_fini_no_ticket(op, NULL, NULL); 415 if (unlikely(ret == -EDEADLK)) { 416 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true, 417 &op->ticket); 418 if (!ret) 419 res_bo = nvbo; 420 } 421 if (unlikely(ret)) { 422 if (ret != -ERESTARTSYS) 423 NV_PRINTK(error, cli, "fail reserve\n"); 424 break; 425 } 426 } 427 428 b->user_priv = (uint64_t)(unsigned long)nvbo; 429 nvbo->reserved_by = file_priv; 430 nvbo->pbbo_index = i; 431 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && 432 (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)) 433 list_add_tail(&nvbo->entry, &both_list); 434 else 435 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) 436 list_add_tail(&nvbo->entry, &vram_list); 437 else 438 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART) 439 list_add_tail(&nvbo->entry, &gart_list); 440 else { 441 NV_PRINTK(error, cli, "invalid valid domains: 0x%08x\n", 442 b->valid_domains); 443 list_add_tail(&nvbo->entry, &both_list); 444 ret = -EINVAL; 445 break; 446 } 447 if (nvbo == res_bo) 448 goto retry; 449 } 450 451 ww_acquire_done(&op->ticket); 452 list_splice_tail(&vram_list, &op->list); 453 list_splice_tail(&gart_list, &op->list); 454 list_splice_tail(&both_list, &op->list); 455 if (ret) 456 validate_fini(op, NULL, NULL); 457 return ret; 458 459 } 460 461 static int 462 validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli, 463 struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo, 464 uint64_t user_pbbo_ptr) 465 { 466 struct nouveau_drm *drm = chan->drm; 467 struct drm_nouveau_gem_pushbuf_bo __user *upbbo = 468 (void __force __user *)(uintptr_t)user_pbbo_ptr; 469 struct nouveau_bo *nvbo; 470 int ret, relocs = 0; 471 472 list_for_each_entry(nvbo, list, entry) { 473 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; 474 475 ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains, 476 b->write_domains, 477 b->valid_domains); 478 if (unlikely(ret)) { 479 NV_PRINTK(error, cli, "fail set_domain\n"); 480 return ret; 481 } 482 483 ret = nouveau_bo_validate(nvbo, true, false); 484 if (unlikely(ret)) { 485 if (ret != -ERESTARTSYS) 486 NV_PRINTK(error, cli, "fail ttm_validate\n"); 487 return ret; 488 } 489 490 ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true); 491 if (unlikely(ret)) { 492 if (ret != -ERESTARTSYS) 493 NV_PRINTK(error, cli, "fail post-validate sync\n"); 494 return ret; 495 } 496 497 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { 498 if (nvbo->bo.offset == b->presumed.offset && 499 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && 500 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || 501 (nvbo->bo.mem.mem_type == TTM_PL_TT && 502 b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART))) 503 continue; 504 505 if (nvbo->bo.mem.mem_type == TTM_PL_TT) 506 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART; 507 else 508 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM; 509 b->presumed.offset = nvbo->bo.offset; 510 b->presumed.valid = 0; 511 relocs++; 512 513 if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed, 514 &b->presumed, sizeof(b->presumed))) 515 return -EFAULT; 516 } 517 } 518 519 return relocs; 520 } 521 522 static int 523 nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, 524 struct drm_file *file_priv, 525 struct drm_nouveau_gem_pushbuf_bo *pbbo, 526 uint64_t user_buffers, int nr_buffers, 527 struct validate_op *op, int *apply_relocs) 528 { 529 struct nouveau_cli *cli = nouveau_cli(file_priv); 530 int ret; 531 532 INIT_LIST_HEAD(&op->list); 533 534 if (nr_buffers == 0) 535 return 0; 536 537 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); 538 if (unlikely(ret)) { 539 if (ret != -ERESTARTSYS) 540 NV_PRINTK(error, cli, "validate_init\n"); 541 return ret; 542 } 543 544 ret = validate_list(chan, cli, &op->list, pbbo, user_buffers); 545 if (unlikely(ret < 0)) { 546 if (ret != -ERESTARTSYS) 547 NV_PRINTK(error, cli, "validating bo list\n"); 548 validate_fini(op, NULL, NULL); 549 return ret; 550 } 551 *apply_relocs = ret; 552 return 0; 553 } 554 555 static inline void 556 u_free(void *addr) 557 { 558 if (!is_vmalloc_addr(addr)) 559 kfree(addr); 560 else 561 vfree(addr); 562 } 563 564 static inline void * 565 u_memcpya(uint64_t user, unsigned nmemb, unsigned size) 566 { 567 void *mem; 568 void __user *userptr = (void __force __user *)(uintptr_t)user; 569 570 size *= nmemb; 571 572 mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); 573 if (!mem) 574 mem = vmalloc(size); 575 if (!mem) 576 return ERR_PTR(-ENOMEM); 577 578 if (copy_from_user(mem, userptr, size)) { 579 u_free(mem); 580 return ERR_PTR(-EFAULT); 581 } 582 583 return mem; 584 } 585 586 static int 587 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, 588 struct drm_nouveau_gem_pushbuf *req, 589 struct drm_nouveau_gem_pushbuf_bo *bo) 590 { 591 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; 592 int ret = 0; 593 unsigned i; 594 595 reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc)); 596 if (IS_ERR(reloc)) 597 return PTR_ERR(reloc); 598 599 for (i = 0; i < req->nr_relocs; i++) { 600 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i]; 601 struct drm_nouveau_gem_pushbuf_bo *b; 602 struct nouveau_bo *nvbo; 603 uint32_t data; 604 605 if (unlikely(r->bo_index > req->nr_buffers)) { 606 NV_PRINTK(error, cli, "reloc bo index invalid\n"); 607 ret = -EINVAL; 608 break; 609 } 610 611 b = &bo[r->bo_index]; 612 if (b->presumed.valid) 613 continue; 614 615 if (unlikely(r->reloc_bo_index > req->nr_buffers)) { 616 NV_PRINTK(error, cli, "reloc container bo index invalid\n"); 617 ret = -EINVAL; 618 break; 619 } 620 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv; 621 622 if (unlikely(r->reloc_bo_offset + 4 > 623 nvbo->bo.mem.num_pages << PAGE_SHIFT)) { 624 NV_PRINTK(error, cli, "reloc outside of bo\n"); 625 ret = -EINVAL; 626 break; 627 } 628 629 if (!nvbo->kmap.virtual) { 630 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, 631 &nvbo->kmap); 632 if (ret) { 633 NV_PRINTK(error, cli, "failed kmap for reloc\n"); 634 break; 635 } 636 nvbo->validate_mapped = true; 637 } 638 639 if (r->flags & NOUVEAU_GEM_RELOC_LOW) 640 data = b->presumed.offset + r->data; 641 else 642 if (r->flags & NOUVEAU_GEM_RELOC_HIGH) 643 data = (b->presumed.offset + r->data) >> 32; 644 else 645 data = r->data; 646 647 if (r->flags & NOUVEAU_GEM_RELOC_OR) { 648 if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART) 649 data |= r->tor; 650 else 651 data |= r->vor; 652 } 653 654 ret = ttm_bo_wait(&nvbo->bo, true, false, false); 655 if (ret) { 656 NV_PRINTK(error, cli, "reloc wait_idle failed: %d\n", ret); 657 break; 658 } 659 660 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data); 661 } 662 663 u_free(reloc); 664 return ret; 665 } 666 667 int 668 nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, 669 struct drm_file *file_priv) 670 { 671 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); 672 struct nouveau_cli *cli = nouveau_cli(file_priv); 673 struct nouveau_abi16_chan *temp; 674 struct nouveau_drm *drm = nouveau_drm(dev); 675 struct drm_nouveau_gem_pushbuf *req = data; 676 struct drm_nouveau_gem_pushbuf_push *push; 677 struct drm_nouveau_gem_pushbuf_bo *bo; 678 struct nouveau_channel *chan = NULL; 679 struct validate_op op; 680 struct nouveau_fence *fence = NULL; 681 int i, j, ret = 0, do_reloc = 0; 682 683 if (unlikely(!abi16)) 684 return -ENOMEM; 685 686 list_for_each_entry(temp, &abi16->channels, head) { 687 if (temp->chan->object->handle == (NVDRM_CHAN | req->channel)) { 688 chan = temp->chan; 689 break; 690 } 691 } 692 693 if (!chan) 694 return nouveau_abi16_put(abi16, -ENOENT); 695 696 req->vram_available = drm->gem.vram_available; 697 req->gart_available = drm->gem.gart_available; 698 if (unlikely(req->nr_push == 0)) 699 goto out_next; 700 701 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { 702 NV_PRINTK(error, cli, "pushbuf push count exceeds limit: %d max %d\n", 703 req->nr_push, NOUVEAU_GEM_MAX_PUSH); 704 return nouveau_abi16_put(abi16, -EINVAL); 705 } 706 707 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { 708 NV_PRINTK(error, cli, "pushbuf bo count exceeds limit: %d max %d\n", 709 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); 710 return nouveau_abi16_put(abi16, -EINVAL); 711 } 712 713 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { 714 NV_PRINTK(error, cli, "pushbuf reloc count exceeds limit: %d max %d\n", 715 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); 716 return nouveau_abi16_put(abi16, -EINVAL); 717 } 718 719 push = u_memcpya(req->push, req->nr_push, sizeof(*push)); 720 if (IS_ERR(push)) 721 return nouveau_abi16_put(abi16, PTR_ERR(push)); 722 723 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); 724 if (IS_ERR(bo)) { 725 u_free(push); 726 return nouveau_abi16_put(abi16, PTR_ERR(bo)); 727 } 728 729 /* Ensure all push buffers are on validate list */ 730 for (i = 0; i < req->nr_push; i++) { 731 if (push[i].bo_index >= req->nr_buffers) { 732 NV_PRINTK(error, cli, "push %d buffer not in list\n", i); 733 ret = -EINVAL; 734 goto out_prevalid; 735 } 736 } 737 738 /* Validate buffer list */ 739 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, 740 req->nr_buffers, &op, &do_reloc); 741 if (ret) { 742 if (ret != -ERESTARTSYS) 743 NV_PRINTK(error, cli, "validate: %d\n", ret); 744 goto out_prevalid; 745 } 746 747 /* Apply any relocations that are required */ 748 if (do_reloc) { 749 ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo); 750 if (ret) { 751 NV_PRINTK(error, cli, "reloc apply: %d\n", ret); 752 goto out; 753 } 754 } 755 756 if (chan->dma.ib_max) { 757 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16); 758 if (ret) { 759 NV_PRINTK(error, cli, "nv50cal_space: %d\n", ret); 760 goto out; 761 } 762 763 for (i = 0; i < req->nr_push; i++) { 764 struct nouveau_bo *nvbo = (void *)(unsigned long) 765 bo[push[i].bo_index].user_priv; 766 767 nv50_dma_push(chan, nvbo, push[i].offset, 768 push[i].length); 769 } 770 } else 771 if (drm->device.info.chipset >= 0x25) { 772 ret = RING_SPACE(chan, req->nr_push * 2); 773 if (ret) { 774 NV_PRINTK(error, cli, "cal_space: %d\n", ret); 775 goto out; 776 } 777 778 for (i = 0; i < req->nr_push; i++) { 779 struct nouveau_bo *nvbo = (void *)(unsigned long) 780 bo[push[i].bo_index].user_priv; 781 782 OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2); 783 OUT_RING(chan, 0); 784 } 785 } else { 786 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS)); 787 if (ret) { 788 NV_PRINTK(error, cli, "jmp_space: %d\n", ret); 789 goto out; 790 } 791 792 for (i = 0; i < req->nr_push; i++) { 793 struct nouveau_bo *nvbo = (void *)(unsigned long) 794 bo[push[i].bo_index].user_priv; 795 uint32_t cmd; 796 797 cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2); 798 cmd |= 0x20000000; 799 if (unlikely(cmd != req->suffix0)) { 800 if (!nvbo->kmap.virtual) { 801 ret = ttm_bo_kmap(&nvbo->bo, 0, 802 nvbo->bo.mem. 803 num_pages, 804 &nvbo->kmap); 805 if (ret) { 806 WIND_RING(chan); 807 goto out; 808 } 809 nvbo->validate_mapped = true; 810 } 811 812 nouveau_bo_wr32(nvbo, (push[i].offset + 813 push[i].length - 8) / 4, cmd); 814 } 815 816 OUT_RING(chan, 0x20000000 | 817 (nvbo->bo.offset + push[i].offset)); 818 OUT_RING(chan, 0); 819 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++) 820 OUT_RING(chan, 0); 821 } 822 } 823 824 ret = nouveau_fence_new(chan, false, &fence); 825 if (ret) { 826 NV_PRINTK(error, cli, "error fencing pushbuf: %d\n", ret); 827 WIND_RING(chan); 828 goto out; 829 } 830 831 out: 832 validate_fini(&op, fence, bo); 833 nouveau_fence_unref(&fence); 834 835 out_prevalid: 836 u_free(bo); 837 u_free(push); 838 839 out_next: 840 if (chan->dma.ib_max) { 841 req->suffix0 = 0x00000000; 842 req->suffix1 = 0x00000000; 843 } else 844 if (drm->device.info.chipset >= 0x25) { 845 req->suffix0 = 0x00020000; 846 req->suffix1 = 0x00000000; 847 } else { 848 req->suffix0 = 0x20000000 | 849 (chan->push.vma.offset + ((chan->dma.cur + 2) << 2)); 850 req->suffix1 = 0x00000000; 851 } 852 853 return nouveau_abi16_put(abi16, ret); 854 } 855 856 int 857 nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, 858 struct drm_file *file_priv) 859 { 860 struct drm_nouveau_gem_cpu_prep *req = data; 861 struct drm_gem_object *gem; 862 struct nouveau_bo *nvbo; 863 bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT); 864 bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE); 865 int ret; 866 867 gem = drm_gem_object_lookup(dev, file_priv, req->handle); 868 if (!gem) 869 return -ENOENT; 870 nvbo = nouveau_gem_object(gem); 871 872 if (no_wait) 873 ret = reservation_object_test_signaled_rcu(nvbo->bo.resv, write) ? 0 : -EBUSY; 874 else { 875 long lret; 876 877 lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true, 30 * HZ); 878 if (!lret) 879 ret = -EBUSY; 880 else if (lret > 0) 881 ret = 0; 882 else 883 ret = lret; 884 } 885 nouveau_bo_sync_for_cpu(nvbo); 886 drm_gem_object_unreference_unlocked(gem); 887 888 return ret; 889 } 890 891 int 892 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, 893 struct drm_file *file_priv) 894 { 895 struct drm_nouveau_gem_cpu_fini *req = data; 896 struct drm_gem_object *gem; 897 struct nouveau_bo *nvbo; 898 899 gem = drm_gem_object_lookup(dev, file_priv, req->handle); 900 if (!gem) 901 return -ENOENT; 902 nvbo = nouveau_gem_object(gem); 903 904 nouveau_bo_sync_for_device(nvbo); 905 drm_gem_object_unreference_unlocked(gem); 906 return 0; 907 } 908 909 int 910 nouveau_gem_ioctl_info(struct drm_device *dev, void *data, 911 struct drm_file *file_priv) 912 { 913 struct drm_nouveau_gem_info *req = data; 914 struct drm_gem_object *gem; 915 int ret; 916 917 gem = drm_gem_object_lookup(dev, file_priv, req->handle); 918 if (!gem) 919 return -ENOENT; 920 921 ret = nouveau_gem_info(file_priv, gem, req); 922 drm_gem_object_unreference_unlocked(gem); 923 return ret; 924 } 925 926