1 /* 2 * Copyright (C) 2008 Ben Skeggs. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sublicense, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial 15 * portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 * 25 */ 26 27 #include "nouveau_drv.h" 28 #include "nouveau_dma.h" 29 #include "nouveau_fence.h" 30 #include "nouveau_abi16.h" 31 32 #include "nouveau_ttm.h" 33 #include "nouveau_gem.h" 34 #include "nouveau_mem.h" 35 #include "nouveau_vmm.h" 36 37 #include <nvif/class.h> 38 39 void 40 nouveau_gem_object_del(struct drm_gem_object *gem) 41 { 42 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 43 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 44 struct device *dev = drm->dev->dev; 45 int ret; 46 47 ret = pm_runtime_get_sync(dev); 48 if (WARN_ON(ret < 0 && ret != -EACCES)) 49 return; 50 51 if (gem->import_attach) 52 drm_prime_gem_destroy(gem, nvbo->bo.sg); 53 54 ttm_bo_put(&nvbo->bo); 55 56 pm_runtime_mark_last_busy(dev); 57 pm_runtime_put_autosuspend(dev); 58 } 59 60 int 61 nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) 62 { 63 struct nouveau_cli *cli = nouveau_cli(file_priv); 64 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 65 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 66 struct device *dev = drm->dev->dev; 67 struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm; 68 struct nouveau_vma *vma; 69 int ret; 70 71 if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50) 72 return 0; 73 74 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); 75 if (ret) 76 return ret; 77 78 ret = pm_runtime_get_sync(dev); 79 if (ret < 0 && ret != -EACCES) 80 goto out; 81 82 ret = nouveau_vma_new(nvbo, vmm, &vma); 83 pm_runtime_mark_last_busy(dev); 84 pm_runtime_put_autosuspend(dev); 85 out: 86 ttm_bo_unreserve(&nvbo->bo); 87 return ret; 88 } 89 90 struct nouveau_gem_object_unmap { 91 struct nouveau_cli_work work; 92 struct nouveau_vma *vma; 93 }; 94 95 static void 96 nouveau_gem_object_delete(struct nouveau_vma *vma) 97 { 98 nouveau_fence_unref(&vma->fence); 99 nouveau_vma_del(&vma); 100 } 101 102 static void 103 nouveau_gem_object_delete_work(struct nouveau_cli_work *w) 104 { 105 struct nouveau_gem_object_unmap *work = 106 container_of(w, typeof(*work), work); 107 nouveau_gem_object_delete(work->vma); 108 kfree(work); 109 } 110 111 static void 112 nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) 113 { 114 struct dma_fence *fence = vma->fence ? &vma->fence->base : NULL; 115 struct nouveau_gem_object_unmap *work; 116 117 list_del_init(&vma->head); 118 119 if (!fence) { 120 nouveau_gem_object_delete(vma); 121 return; 122 } 123 124 if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) { 125 WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0); 126 nouveau_gem_object_delete(vma); 127 return; 128 } 129 130 work->work.func = nouveau_gem_object_delete_work; 131 work->vma = vma; 132 nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work); 133 } 134 135 void 136 nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) 137 { 138 struct nouveau_cli *cli = nouveau_cli(file_priv); 139 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 140 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 141 struct device *dev = drm->dev->dev; 142 struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : & cli->vmm; 143 struct nouveau_vma *vma; 144 int ret; 145 146 if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50) 147 return; 148 149 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); 150 if (ret) 151 return; 152 153 vma = nouveau_vma_find(nvbo, vmm); 154 if (vma) { 155 if (--vma->refs == 0) { 156 ret = pm_runtime_get_sync(dev); 157 if (!WARN_ON(ret < 0 && ret != -EACCES)) { 158 nouveau_gem_object_unmap(nvbo, vma); 159 pm_runtime_mark_last_busy(dev); 160 pm_runtime_put_autosuspend(dev); 161 } 162 } 163 } 164 ttm_bo_unreserve(&nvbo->bo); 165 } 166 167 int 168 nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain, 169 uint32_t tile_mode, uint32_t tile_flags, 170 struct nouveau_bo **pnvbo) 171 { 172 struct nouveau_drm *drm = cli->drm; 173 struct nouveau_bo *nvbo; 174 u32 flags = 0; 175 int ret; 176 177 if (domain & NOUVEAU_GEM_DOMAIN_VRAM) 178 flags |= TTM_PL_FLAG_VRAM; 179 if (domain & NOUVEAU_GEM_DOMAIN_GART) 180 flags |= TTM_PL_FLAG_TT; 181 if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU) 182 flags |= TTM_PL_FLAG_SYSTEM; 183 184 if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) 185 flags |= TTM_PL_FLAG_UNCACHED; 186 187 nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode, 188 tile_flags); 189 if (IS_ERR(nvbo)) 190 return PTR_ERR(nvbo); 191 192 /* Initialize the embedded gem-object. We return a single gem-reference 193 * to the caller, instead of a normal nouveau_bo ttm reference. */ 194 ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size); 195 if (ret) { 196 nouveau_bo_ref(NULL, &nvbo); 197 return ret; 198 } 199 200 ret = nouveau_bo_init(nvbo, size, align, flags, NULL, NULL); 201 if (ret) { 202 nouveau_bo_ref(NULL, &nvbo); 203 return ret; 204 } 205 206 /* we restrict allowed domains on nv50+ to only the types 207 * that were requested at creation time. not possibly on 208 * earlier chips without busting the ABI. 209 */ 210 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | 211 NOUVEAU_GEM_DOMAIN_GART; 212 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) 213 nvbo->valid_domains &= domain; 214 215 nvbo->bo.persistent_swap_storage = nvbo->bo.base.filp; 216 *pnvbo = nvbo; 217 return 0; 218 } 219 220 static int 221 nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, 222 struct drm_nouveau_gem_info *rep) 223 { 224 struct nouveau_cli *cli = nouveau_cli(file_priv); 225 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 226 struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm; 227 struct nouveau_vma *vma; 228 229 if (is_power_of_2(nvbo->valid_domains)) 230 rep->domain = nvbo->valid_domains; 231 else if (nvbo->bo.mem.mem_type == TTM_PL_TT) 232 rep->domain = NOUVEAU_GEM_DOMAIN_GART; 233 else 234 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; 235 rep->offset = nvbo->bo.offset; 236 if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) { 237 vma = nouveau_vma_find(nvbo, vmm); 238 if (!vma) 239 return -EINVAL; 240 241 rep->offset = vma->addr; 242 } 243 244 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; 245 rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node); 246 rep->tile_mode = nvbo->mode; 247 rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG; 248 if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) 249 rep->tile_flags |= nvbo->kind << 8; 250 else 251 if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) 252 rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16; 253 else 254 rep->tile_flags |= nvbo->zeta; 255 return 0; 256 } 257 258 int 259 nouveau_gem_ioctl_new(struct drm_device *dev, void *data, 260 struct drm_file *file_priv) 261 { 262 struct nouveau_cli *cli = nouveau_cli(file_priv); 263 struct drm_nouveau_gem_new *req = data; 264 struct nouveau_bo *nvbo = NULL; 265 int ret = 0; 266 267 ret = nouveau_gem_new(cli, req->info.size, req->align, 268 req->info.domain, req->info.tile_mode, 269 req->info.tile_flags, &nvbo); 270 if (ret) 271 return ret; 272 273 ret = drm_gem_handle_create(file_priv, &nvbo->bo.base, 274 &req->info.handle); 275 if (ret == 0) { 276 ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info); 277 if (ret) 278 drm_gem_handle_delete(file_priv, req->info.handle); 279 } 280 281 /* drop reference from allocate - handle holds it now */ 282 drm_gem_object_put_unlocked(&nvbo->bo.base); 283 return ret; 284 } 285 286 static int 287 nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, 288 uint32_t write_domains, uint32_t valid_domains) 289 { 290 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 291 struct ttm_buffer_object *bo = &nvbo->bo; 292 uint32_t domains = valid_domains & nvbo->valid_domains & 293 (write_domains ? write_domains : read_domains); 294 uint32_t pref_flags = 0, valid_flags = 0; 295 296 if (!domains) 297 return -EINVAL; 298 299 if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) 300 valid_flags |= TTM_PL_FLAG_VRAM; 301 302 if (valid_domains & NOUVEAU_GEM_DOMAIN_GART) 303 valid_flags |= TTM_PL_FLAG_TT; 304 305 if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) && 306 bo->mem.mem_type == TTM_PL_VRAM) 307 pref_flags |= TTM_PL_FLAG_VRAM; 308 309 else if ((domains & NOUVEAU_GEM_DOMAIN_GART) && 310 bo->mem.mem_type == TTM_PL_TT) 311 pref_flags |= TTM_PL_FLAG_TT; 312 313 else if (domains & NOUVEAU_GEM_DOMAIN_VRAM) 314 pref_flags |= TTM_PL_FLAG_VRAM; 315 316 else 317 pref_flags |= TTM_PL_FLAG_TT; 318 319 nouveau_bo_placement_set(nvbo, pref_flags, valid_flags); 320 321 return 0; 322 } 323 324 struct validate_op { 325 struct list_head list; 326 struct ww_acquire_ctx ticket; 327 }; 328 329 static void 330 validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan, 331 struct nouveau_fence *fence, 332 struct drm_nouveau_gem_pushbuf_bo *pbbo) 333 { 334 struct nouveau_bo *nvbo; 335 struct drm_nouveau_gem_pushbuf_bo *b; 336 337 while (!list_empty(&op->list)) { 338 nvbo = list_entry(op->list.next, struct nouveau_bo, entry); 339 b = &pbbo[nvbo->pbbo_index]; 340 341 if (likely(fence)) { 342 nouveau_bo_fence(nvbo, fence, !!b->write_domains); 343 344 if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) { 345 struct nouveau_vma *vma = 346 (void *)(unsigned long)b->user_priv; 347 nouveau_fence_unref(&vma->fence); 348 dma_fence_get(&fence->base); 349 vma->fence = fence; 350 } 351 } 352 353 if (unlikely(nvbo->validate_mapped)) { 354 ttm_bo_kunmap(&nvbo->kmap); 355 nvbo->validate_mapped = false; 356 } 357 358 list_del(&nvbo->entry); 359 nvbo->reserved_by = NULL; 360 ttm_bo_unreserve(&nvbo->bo); 361 drm_gem_object_put_unlocked(&nvbo->bo.base); 362 } 363 } 364 365 static void 366 validate_fini(struct validate_op *op, struct nouveau_channel *chan, 367 struct nouveau_fence *fence, 368 struct drm_nouveau_gem_pushbuf_bo *pbbo) 369 { 370 validate_fini_no_ticket(op, chan, fence, pbbo); 371 ww_acquire_fini(&op->ticket); 372 } 373 374 static int 375 validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, 376 struct drm_nouveau_gem_pushbuf_bo *pbbo, 377 int nr_buffers, struct validate_op *op) 378 { 379 struct nouveau_cli *cli = nouveau_cli(file_priv); 380 int trycnt = 0; 381 int ret = -EINVAL, i; 382 struct nouveau_bo *res_bo = NULL; 383 LIST_HEAD(gart_list); 384 LIST_HEAD(vram_list); 385 LIST_HEAD(both_list); 386 387 ww_acquire_init(&op->ticket, &reservation_ww_class); 388 retry: 389 if (++trycnt > 100000) { 390 NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__); 391 return -EINVAL; 392 } 393 394 for (i = 0; i < nr_buffers; i++) { 395 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i]; 396 struct drm_gem_object *gem; 397 struct nouveau_bo *nvbo; 398 399 gem = drm_gem_object_lookup(file_priv, b->handle); 400 if (!gem) { 401 NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle); 402 ret = -ENOENT; 403 break; 404 } 405 nvbo = nouveau_gem_object(gem); 406 if (nvbo == res_bo) { 407 res_bo = NULL; 408 drm_gem_object_put_unlocked(gem); 409 continue; 410 } 411 412 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { 413 NV_PRINTK(err, cli, "multiple instances of buffer %d on " 414 "validation list\n", b->handle); 415 drm_gem_object_put_unlocked(gem); 416 ret = -EINVAL; 417 break; 418 } 419 420 ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket); 421 if (ret) { 422 list_splice_tail_init(&vram_list, &op->list); 423 list_splice_tail_init(&gart_list, &op->list); 424 list_splice_tail_init(&both_list, &op->list); 425 validate_fini_no_ticket(op, chan, NULL, NULL); 426 if (unlikely(ret == -EDEADLK)) { 427 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true, 428 &op->ticket); 429 if (!ret) 430 res_bo = nvbo; 431 } 432 if (unlikely(ret)) { 433 if (ret != -ERESTARTSYS) 434 NV_PRINTK(err, cli, "fail reserve\n"); 435 break; 436 } 437 } 438 439 if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) { 440 struct nouveau_vmm *vmm = chan->vmm; 441 struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm); 442 if (!vma) { 443 NV_PRINTK(err, cli, "vma not found!\n"); 444 ret = -EINVAL; 445 break; 446 } 447 448 b->user_priv = (uint64_t)(unsigned long)vma; 449 } else { 450 b->user_priv = (uint64_t)(unsigned long)nvbo; 451 } 452 453 nvbo->reserved_by = file_priv; 454 nvbo->pbbo_index = i; 455 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && 456 (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)) 457 list_add_tail(&nvbo->entry, &both_list); 458 else 459 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) 460 list_add_tail(&nvbo->entry, &vram_list); 461 else 462 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART) 463 list_add_tail(&nvbo->entry, &gart_list); 464 else { 465 NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n", 466 b->valid_domains); 467 list_add_tail(&nvbo->entry, &both_list); 468 ret = -EINVAL; 469 break; 470 } 471 if (nvbo == res_bo) 472 goto retry; 473 } 474 475 ww_acquire_done(&op->ticket); 476 list_splice_tail(&vram_list, &op->list); 477 list_splice_tail(&gart_list, &op->list); 478 list_splice_tail(&both_list, &op->list); 479 if (ret) 480 validate_fini(op, chan, NULL, NULL); 481 return ret; 482 483 } 484 485 static int 486 validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli, 487 struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo, 488 uint64_t user_pbbo_ptr) 489 { 490 struct nouveau_drm *drm = chan->drm; 491 struct drm_nouveau_gem_pushbuf_bo __user *upbbo = 492 (void __force __user *)(uintptr_t)user_pbbo_ptr; 493 struct nouveau_bo *nvbo; 494 int ret, relocs = 0; 495 496 list_for_each_entry(nvbo, list, entry) { 497 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; 498 499 ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains, 500 b->write_domains, 501 b->valid_domains); 502 if (unlikely(ret)) { 503 NV_PRINTK(err, cli, "fail set_domain\n"); 504 return ret; 505 } 506 507 ret = nouveau_bo_validate(nvbo, true, false); 508 if (unlikely(ret)) { 509 if (ret != -ERESTARTSYS) 510 NV_PRINTK(err, cli, "fail ttm_validate\n"); 511 return ret; 512 } 513 514 ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true); 515 if (unlikely(ret)) { 516 if (ret != -ERESTARTSYS) 517 NV_PRINTK(err, cli, "fail post-validate sync\n"); 518 return ret; 519 } 520 521 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { 522 if (nvbo->bo.offset == b->presumed.offset && 523 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && 524 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || 525 (nvbo->bo.mem.mem_type == TTM_PL_TT && 526 b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART))) 527 continue; 528 529 if (nvbo->bo.mem.mem_type == TTM_PL_TT) 530 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART; 531 else 532 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM; 533 b->presumed.offset = nvbo->bo.offset; 534 b->presumed.valid = 0; 535 relocs++; 536 537 if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed, 538 &b->presumed, sizeof(b->presumed))) 539 return -EFAULT; 540 } 541 } 542 543 return relocs; 544 } 545 546 static int 547 nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, 548 struct drm_file *file_priv, 549 struct drm_nouveau_gem_pushbuf_bo *pbbo, 550 uint64_t user_buffers, int nr_buffers, 551 struct validate_op *op, int *apply_relocs) 552 { 553 struct nouveau_cli *cli = nouveau_cli(file_priv); 554 int ret; 555 556 INIT_LIST_HEAD(&op->list); 557 558 if (nr_buffers == 0) 559 return 0; 560 561 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); 562 if (unlikely(ret)) { 563 if (ret != -ERESTARTSYS) 564 NV_PRINTK(err, cli, "validate_init\n"); 565 return ret; 566 } 567 568 ret = validate_list(chan, cli, &op->list, pbbo, user_buffers); 569 if (unlikely(ret < 0)) { 570 if (ret != -ERESTARTSYS) 571 NV_PRINTK(err, cli, "validating bo list\n"); 572 validate_fini(op, chan, NULL, NULL); 573 return ret; 574 } 575 *apply_relocs = ret; 576 return 0; 577 } 578 579 static inline void 580 u_free(void *addr) 581 { 582 kvfree(addr); 583 } 584 585 static inline void * 586 u_memcpya(uint64_t user, unsigned nmemb, unsigned size) 587 { 588 void *mem; 589 void __user *userptr = (void __force __user *)(uintptr_t)user; 590 591 size *= nmemb; 592 593 mem = kvmalloc(size, GFP_KERNEL); 594 if (!mem) 595 return ERR_PTR(-ENOMEM); 596 597 if (copy_from_user(mem, userptr, size)) { 598 u_free(mem); 599 return ERR_PTR(-EFAULT); 600 } 601 602 return mem; 603 } 604 605 static int 606 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, 607 struct drm_nouveau_gem_pushbuf *req, 608 struct drm_nouveau_gem_pushbuf_bo *bo) 609 { 610 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; 611 int ret = 0; 612 unsigned i; 613 614 reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc)); 615 if (IS_ERR(reloc)) 616 return PTR_ERR(reloc); 617 618 for (i = 0; i < req->nr_relocs; i++) { 619 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i]; 620 struct drm_nouveau_gem_pushbuf_bo *b; 621 struct nouveau_bo *nvbo; 622 uint32_t data; 623 624 if (unlikely(r->bo_index >= req->nr_buffers)) { 625 NV_PRINTK(err, cli, "reloc bo index invalid\n"); 626 ret = -EINVAL; 627 break; 628 } 629 630 b = &bo[r->bo_index]; 631 if (b->presumed.valid) 632 continue; 633 634 if (unlikely(r->reloc_bo_index >= req->nr_buffers)) { 635 NV_PRINTK(err, cli, "reloc container bo index invalid\n"); 636 ret = -EINVAL; 637 break; 638 } 639 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv; 640 641 if (unlikely(r->reloc_bo_offset + 4 > 642 nvbo->bo.mem.num_pages << PAGE_SHIFT)) { 643 NV_PRINTK(err, cli, "reloc outside of bo\n"); 644 ret = -EINVAL; 645 break; 646 } 647 648 if (!nvbo->kmap.virtual) { 649 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, 650 &nvbo->kmap); 651 if (ret) { 652 NV_PRINTK(err, cli, "failed kmap for reloc\n"); 653 break; 654 } 655 nvbo->validate_mapped = true; 656 } 657 658 if (r->flags & NOUVEAU_GEM_RELOC_LOW) 659 data = b->presumed.offset + r->data; 660 else 661 if (r->flags & NOUVEAU_GEM_RELOC_HIGH) 662 data = (b->presumed.offset + r->data) >> 32; 663 else 664 data = r->data; 665 666 if (r->flags & NOUVEAU_GEM_RELOC_OR) { 667 if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART) 668 data |= r->tor; 669 else 670 data |= r->vor; 671 } 672 673 ret = ttm_bo_wait(&nvbo->bo, false, false); 674 if (ret) { 675 NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret); 676 break; 677 } 678 679 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data); 680 } 681 682 u_free(reloc); 683 return ret; 684 } 685 686 int 687 nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, 688 struct drm_file *file_priv) 689 { 690 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv); 691 struct nouveau_cli *cli = nouveau_cli(file_priv); 692 struct nouveau_abi16_chan *temp; 693 struct nouveau_drm *drm = nouveau_drm(dev); 694 struct drm_nouveau_gem_pushbuf *req = data; 695 struct drm_nouveau_gem_pushbuf_push *push; 696 struct drm_nouveau_gem_pushbuf_bo *bo; 697 struct nouveau_channel *chan = NULL; 698 struct validate_op op; 699 struct nouveau_fence *fence = NULL; 700 int i, j, ret = 0, do_reloc = 0; 701 702 if (unlikely(!abi16)) 703 return -ENOMEM; 704 705 list_for_each_entry(temp, &abi16->channels, head) { 706 if (temp->chan->chid == req->channel) { 707 chan = temp->chan; 708 break; 709 } 710 } 711 712 if (!chan) 713 return nouveau_abi16_put(abi16, -ENOENT); 714 715 req->vram_available = drm->gem.vram_available; 716 req->gart_available = drm->gem.gart_available; 717 if (unlikely(req->nr_push == 0)) 718 goto out_next; 719 720 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { 721 NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n", 722 req->nr_push, NOUVEAU_GEM_MAX_PUSH); 723 return nouveau_abi16_put(abi16, -EINVAL); 724 } 725 726 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { 727 NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n", 728 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); 729 return nouveau_abi16_put(abi16, -EINVAL); 730 } 731 732 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { 733 NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n", 734 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); 735 return nouveau_abi16_put(abi16, -EINVAL); 736 } 737 738 push = u_memcpya(req->push, req->nr_push, sizeof(*push)); 739 if (IS_ERR(push)) 740 return nouveau_abi16_put(abi16, PTR_ERR(push)); 741 742 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); 743 if (IS_ERR(bo)) { 744 u_free(push); 745 return nouveau_abi16_put(abi16, PTR_ERR(bo)); 746 } 747 748 /* Ensure all push buffers are on validate list */ 749 for (i = 0; i < req->nr_push; i++) { 750 if (push[i].bo_index >= req->nr_buffers) { 751 NV_PRINTK(err, cli, "push %d buffer not in list\n", i); 752 ret = -EINVAL; 753 goto out_prevalid; 754 } 755 } 756 757 /* Validate buffer list */ 758 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, 759 req->nr_buffers, &op, &do_reloc); 760 if (ret) { 761 if (ret != -ERESTARTSYS) 762 NV_PRINTK(err, cli, "validate: %d\n", ret); 763 goto out_prevalid; 764 } 765 766 /* Apply any relocations that are required */ 767 if (do_reloc) { 768 ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo); 769 if (ret) { 770 NV_PRINTK(err, cli, "reloc apply: %d\n", ret); 771 goto out; 772 } 773 } 774 775 if (chan->dma.ib_max) { 776 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16); 777 if (ret) { 778 NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret); 779 goto out; 780 } 781 782 for (i = 0; i < req->nr_push; i++) { 783 struct nouveau_vma *vma = (void *)(unsigned long) 784 bo[push[i].bo_index].user_priv; 785 786 nv50_dma_push(chan, vma->addr + push[i].offset, 787 push[i].length); 788 } 789 } else 790 if (drm->client.device.info.chipset >= 0x25) { 791 ret = RING_SPACE(chan, req->nr_push * 2); 792 if (ret) { 793 NV_PRINTK(err, cli, "cal_space: %d\n", ret); 794 goto out; 795 } 796 797 for (i = 0; i < req->nr_push; i++) { 798 struct nouveau_bo *nvbo = (void *)(unsigned long) 799 bo[push[i].bo_index].user_priv; 800 801 OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2); 802 OUT_RING(chan, 0); 803 } 804 } else { 805 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS)); 806 if (ret) { 807 NV_PRINTK(err, cli, "jmp_space: %d\n", ret); 808 goto out; 809 } 810 811 for (i = 0; i < req->nr_push; i++) { 812 struct nouveau_bo *nvbo = (void *)(unsigned long) 813 bo[push[i].bo_index].user_priv; 814 uint32_t cmd; 815 816 cmd = chan->push.addr + ((chan->dma.cur + 2) << 2); 817 cmd |= 0x20000000; 818 if (unlikely(cmd != req->suffix0)) { 819 if (!nvbo->kmap.virtual) { 820 ret = ttm_bo_kmap(&nvbo->bo, 0, 821 nvbo->bo.mem. 822 num_pages, 823 &nvbo->kmap); 824 if (ret) { 825 WIND_RING(chan); 826 goto out; 827 } 828 nvbo->validate_mapped = true; 829 } 830 831 nouveau_bo_wr32(nvbo, (push[i].offset + 832 push[i].length - 8) / 4, cmd); 833 } 834 835 OUT_RING(chan, 0x20000000 | 836 (nvbo->bo.offset + push[i].offset)); 837 OUT_RING(chan, 0); 838 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++) 839 OUT_RING(chan, 0); 840 } 841 } 842 843 ret = nouveau_fence_new(chan, false, &fence); 844 if (ret) { 845 NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret); 846 WIND_RING(chan); 847 goto out; 848 } 849 850 out: 851 validate_fini(&op, chan, fence, bo); 852 nouveau_fence_unref(&fence); 853 854 out_prevalid: 855 u_free(bo); 856 u_free(push); 857 858 out_next: 859 if (chan->dma.ib_max) { 860 req->suffix0 = 0x00000000; 861 req->suffix1 = 0x00000000; 862 } else 863 if (drm->client.device.info.chipset >= 0x25) { 864 req->suffix0 = 0x00020000; 865 req->suffix1 = 0x00000000; 866 } else { 867 req->suffix0 = 0x20000000 | 868 (chan->push.addr + ((chan->dma.cur + 2) << 2)); 869 req->suffix1 = 0x00000000; 870 } 871 872 return nouveau_abi16_put(abi16, ret); 873 } 874 875 int 876 nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, 877 struct drm_file *file_priv) 878 { 879 struct drm_nouveau_gem_cpu_prep *req = data; 880 struct drm_gem_object *gem; 881 struct nouveau_bo *nvbo; 882 bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT); 883 bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE); 884 long lret; 885 int ret; 886 887 gem = drm_gem_object_lookup(file_priv, req->handle); 888 if (!gem) 889 return -ENOENT; 890 nvbo = nouveau_gem_object(gem); 891 892 lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true, 893 no_wait ? 0 : 30 * HZ); 894 if (!lret) 895 ret = -EBUSY; 896 else if (lret > 0) 897 ret = 0; 898 else 899 ret = lret; 900 901 nouveau_bo_sync_for_cpu(nvbo); 902 drm_gem_object_put_unlocked(gem); 903 904 return ret; 905 } 906 907 int 908 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, 909 struct drm_file *file_priv) 910 { 911 struct drm_nouveau_gem_cpu_fini *req = data; 912 struct drm_gem_object *gem; 913 struct nouveau_bo *nvbo; 914 915 gem = drm_gem_object_lookup(file_priv, req->handle); 916 if (!gem) 917 return -ENOENT; 918 nvbo = nouveau_gem_object(gem); 919 920 nouveau_bo_sync_for_device(nvbo); 921 drm_gem_object_put_unlocked(gem); 922 return 0; 923 } 924 925 int 926 nouveau_gem_ioctl_info(struct drm_device *dev, void *data, 927 struct drm_file *file_priv) 928 { 929 struct drm_nouveau_gem_info *req = data; 930 struct drm_gem_object *gem; 931 int ret; 932 933 gem = drm_gem_object_lookup(file_priv, req->handle); 934 if (!gem) 935 return -ENOENT; 936 937 ret = nouveau_gem_info(file_priv, gem, req); 938 drm_gem_object_put_unlocked(gem); 939 return ret; 940 } 941 942