1 /* 2 * Copyright 2007 Dave Airlied 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 */ 24 /* 25 * Authors: Dave Airlied <airlied@linux.ie> 26 * Ben Skeggs <darktama@iinet.net.au> 27 * Jeremy Kolb <jkolb@brandeis.edu> 28 */ 29 30 #include <linux/dma-mapping.h> 31 32 #include "nouveau_drv.h" 33 #include "nouveau_chan.h" 34 #include "nouveau_fence.h" 35 36 #include "nouveau_bo.h" 37 #include "nouveau_ttm.h" 38 #include "nouveau_gem.h" 39 #include "nouveau_mem.h" 40 #include "nouveau_vmm.h" 41 42 #include <nvif/class.h> 43 #include <nvif/if500b.h> 44 #include <nvif/if900b.h> 45 46 static int nouveau_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm, 47 struct ttm_resource *reg); 48 static void nouveau_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm); 49 50 /* 51 * NV10-NV40 tiling helpers 52 */ 53 54 static void 55 nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg, 56 u32 addr, u32 size, u32 pitch, u32 flags) 57 { 58 struct nouveau_drm *drm = nouveau_drm(dev); 59 int i = reg - drm->tile.reg; 60 struct nvkm_fb *fb = nvxx_fb(&drm->client.device); 61 struct nvkm_fb_tile *tile = &fb->tile.region[i]; 62 63 nouveau_fence_unref(®->fence); 64 65 if (tile->pitch) 66 nvkm_fb_tile_fini(fb, i, tile); 67 68 if (pitch) 69 nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile); 70 71 nvkm_fb_tile_prog(fb, i, tile); 72 } 73 74 static struct nouveau_drm_tile * 75 nv10_bo_get_tile_region(struct drm_device *dev, int i) 76 { 77 struct nouveau_drm *drm = nouveau_drm(dev); 78 struct nouveau_drm_tile *tile = &drm->tile.reg[i]; 79 80 spin_lock(&drm->tile.lock); 81 82 if (!tile->used && 83 (!tile->fence || nouveau_fence_done(tile->fence))) 84 tile->used = true; 85 else 86 tile = NULL; 87 88 spin_unlock(&drm->tile.lock); 89 return tile; 90 } 91 92 static void 93 nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, 94 struct dma_fence *fence) 95 { 96 struct nouveau_drm *drm = nouveau_drm(dev); 97 98 if (tile) { 99 spin_lock(&drm->tile.lock); 100 tile->fence = (struct nouveau_fence *)dma_fence_get(fence); 101 tile->used = false; 102 spin_unlock(&drm->tile.lock); 103 } 104 } 105 106 static struct nouveau_drm_tile * 107 nv10_bo_set_tiling(struct drm_device *dev, u32 addr, 108 u32 size, u32 pitch, u32 zeta) 109 { 110 struct nouveau_drm *drm = nouveau_drm(dev); 111 struct nvkm_fb *fb = nvxx_fb(&drm->client.device); 112 struct nouveau_drm_tile *tile, *found = NULL; 113 int i; 114 115 for (i = 0; i < fb->tile.regions; i++) { 116 tile = nv10_bo_get_tile_region(dev, i); 117 118 if (pitch && !found) { 119 found = tile; 120 continue; 121 122 } else if (tile && fb->tile.region[i].pitch) { 123 /* Kill an unused tile region. */ 124 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0); 125 } 126 127 nv10_bo_put_tile_region(dev, tile, NULL); 128 } 129 130 if (found) 131 nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta); 132 return found; 133 } 134 135 static void 136 nouveau_bo_del_ttm(struct ttm_buffer_object *bo) 137 { 138 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 139 struct drm_device *dev = drm->dev; 140 struct nouveau_bo *nvbo = nouveau_bo(bo); 141 142 WARN_ON(nvbo->bo.pin_count > 0); 143 nouveau_bo_del_io_reserve_lru(bo); 144 nv10_bo_put_tile_region(dev, nvbo->tile, NULL); 145 146 /* 147 * If nouveau_bo_new() allocated this buffer, the GEM object was never 148 * initialized, so don't attempt to release it. 149 */ 150 if (bo->base.dev) 151 drm_gem_object_release(&bo->base); 152 else 153 dma_resv_fini(&bo->base._resv); 154 155 kfree(nvbo); 156 } 157 158 static inline u64 159 roundup_64(u64 x, u32 y) 160 { 161 x += y - 1; 162 do_div(x, y); 163 return x * y; 164 } 165 166 static void 167 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size) 168 { 169 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 170 struct nvif_device *device = &drm->client.device; 171 172 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) { 173 if (nvbo->mode) { 174 if (device->info.chipset >= 0x40) { 175 *align = 65536; 176 *size = roundup_64(*size, 64 * nvbo->mode); 177 178 } else if (device->info.chipset >= 0x30) { 179 *align = 32768; 180 *size = roundup_64(*size, 64 * nvbo->mode); 181 182 } else if (device->info.chipset >= 0x20) { 183 *align = 16384; 184 *size = roundup_64(*size, 64 * nvbo->mode); 185 186 } else if (device->info.chipset >= 0x10) { 187 *align = 16384; 188 *size = roundup_64(*size, 32 * nvbo->mode); 189 } 190 } 191 } else { 192 *size = roundup_64(*size, (1 << nvbo->page)); 193 *align = max((1 << nvbo->page), *align); 194 } 195 196 *size = roundup_64(*size, PAGE_SIZE); 197 } 198 199 struct nouveau_bo * 200 nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, 201 u32 tile_mode, u32 tile_flags) 202 { 203 struct nouveau_drm *drm = cli->drm; 204 struct nouveau_bo *nvbo; 205 struct nvif_mmu *mmu = &cli->mmu; 206 struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm; 207 int i, pi = -1; 208 209 if (!*size) { 210 NV_WARN(drm, "skipped size %016llx\n", *size); 211 return ERR_PTR(-EINVAL); 212 } 213 214 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); 215 if (!nvbo) 216 return ERR_PTR(-ENOMEM); 217 INIT_LIST_HEAD(&nvbo->head); 218 INIT_LIST_HEAD(&nvbo->entry); 219 INIT_LIST_HEAD(&nvbo->vma_list); 220 nvbo->bo.bdev = &drm->ttm.bdev; 221 222 /* This is confusing, and doesn't actually mean we want an uncached 223 * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated 224 * into in nouveau_gem_new(). 225 */ 226 if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) { 227 /* Determine if we can get a cache-coherent map, forcing 228 * uncached mapping if we can't. 229 */ 230 if (!nouveau_drm_use_coherent_gpu_mapping(drm)) 231 nvbo->force_coherent = true; 232 } 233 234 if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) { 235 nvbo->kind = (tile_flags & 0x0000ff00) >> 8; 236 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { 237 kfree(nvbo); 238 return ERR_PTR(-EINVAL); 239 } 240 241 nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind; 242 } else 243 if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { 244 nvbo->kind = (tile_flags & 0x00007f00) >> 8; 245 nvbo->comp = (tile_flags & 0x00030000) >> 16; 246 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { 247 kfree(nvbo); 248 return ERR_PTR(-EINVAL); 249 } 250 } else { 251 nvbo->zeta = (tile_flags & 0x00000007); 252 } 253 nvbo->mode = tile_mode; 254 nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG); 255 256 /* Determine the desirable target GPU page size for the buffer. */ 257 for (i = 0; i < vmm->page_nr; i++) { 258 /* Because we cannot currently allow VMM maps to fail 259 * during buffer migration, we need to determine page 260 * size for the buffer up-front, and pre-allocate its 261 * page tables. 262 * 263 * Skip page sizes that can't support needed domains. 264 */ 265 if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE && 266 (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram) 267 continue; 268 if ((domain & NOUVEAU_GEM_DOMAIN_GART) && 269 (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT)) 270 continue; 271 272 /* Select this page size if it's the first that supports 273 * the potential memory domains, or when it's compatible 274 * with the requested compression settings. 275 */ 276 if (pi < 0 || !nvbo->comp || vmm->page[i].comp) 277 pi = i; 278 279 /* Stop once the buffer is larger than the current page size. */ 280 if (*size >= 1ULL << vmm->page[i].shift) 281 break; 282 } 283 284 if (WARN_ON(pi < 0)) 285 return ERR_PTR(-EINVAL); 286 287 /* Disable compression if suitable settings couldn't be found. */ 288 if (nvbo->comp && !vmm->page[pi].comp) { 289 if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100) 290 nvbo->kind = mmu->kind[nvbo->kind]; 291 nvbo->comp = 0; 292 } 293 nvbo->page = vmm->page[pi].shift; 294 295 nouveau_bo_fixup_align(nvbo, align, size); 296 297 return nvbo; 298 } 299 300 int 301 nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain, 302 struct sg_table *sg, struct dma_resv *robj) 303 { 304 int type = sg ? ttm_bo_type_sg : ttm_bo_type_device; 305 int ret; 306 307 nouveau_bo_placement_set(nvbo, domain, 0); 308 INIT_LIST_HEAD(&nvbo->io_reserve_lru); 309 310 ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type, 311 &nvbo->placement, align >> PAGE_SHIFT, false, sg, 312 robj, nouveau_bo_del_ttm); 313 if (ret) { 314 /* ttm will call nouveau_bo_del_ttm if it fails.. */ 315 return ret; 316 } 317 318 return 0; 319 } 320 321 int 322 nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, 323 uint32_t domain, uint32_t tile_mode, uint32_t tile_flags, 324 struct sg_table *sg, struct dma_resv *robj, 325 struct nouveau_bo **pnvbo) 326 { 327 struct nouveau_bo *nvbo; 328 int ret; 329 330 nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode, 331 tile_flags); 332 if (IS_ERR(nvbo)) 333 return PTR_ERR(nvbo); 334 335 nvbo->bo.base.size = size; 336 dma_resv_init(&nvbo->bo.base._resv); 337 drm_vma_node_reset(&nvbo->bo.base.vma_node); 338 339 ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj); 340 if (ret) 341 return ret; 342 343 *pnvbo = nvbo; 344 return 0; 345 } 346 347 static void 348 set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t domain) 349 { 350 *n = 0; 351 352 if (domain & NOUVEAU_GEM_DOMAIN_VRAM) { 353 pl[*n].mem_type = TTM_PL_VRAM; 354 pl[*n].flags = 0; 355 (*n)++; 356 } 357 if (domain & NOUVEAU_GEM_DOMAIN_GART) { 358 pl[*n].mem_type = TTM_PL_TT; 359 pl[*n].flags = 0; 360 (*n)++; 361 } 362 if (domain & NOUVEAU_GEM_DOMAIN_CPU) { 363 pl[*n].mem_type = TTM_PL_SYSTEM; 364 pl[(*n)++].flags = 0; 365 } 366 } 367 368 static void 369 set_placement_range(struct nouveau_bo *nvbo, uint32_t domain) 370 { 371 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 372 u64 vram_size = drm->client.device.info.ram_size; 373 unsigned i, fpfn, lpfn; 374 375 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && 376 nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) && 377 nvbo->bo.base.size < vram_size / 4) { 378 /* 379 * Make sure that the color and depth buffers are handled 380 * by independent memory controller units. Up to a 9x 381 * speed up when alpha-blending and depth-test are enabled 382 * at the same time. 383 */ 384 if (nvbo->zeta) { 385 fpfn = (vram_size / 2) >> PAGE_SHIFT; 386 lpfn = ~0; 387 } else { 388 fpfn = 0; 389 lpfn = (vram_size / 2) >> PAGE_SHIFT; 390 } 391 for (i = 0; i < nvbo->placement.num_placement; ++i) { 392 nvbo->placements[i].fpfn = fpfn; 393 nvbo->placements[i].lpfn = lpfn; 394 } 395 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) { 396 nvbo->busy_placements[i].fpfn = fpfn; 397 nvbo->busy_placements[i].lpfn = lpfn; 398 } 399 } 400 } 401 402 void 403 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain, 404 uint32_t busy) 405 { 406 struct ttm_placement *pl = &nvbo->placement; 407 408 pl->placement = nvbo->placements; 409 set_placement_list(nvbo->placements, &pl->num_placement, domain); 410 411 pl->busy_placement = nvbo->busy_placements; 412 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, 413 domain | busy); 414 415 set_placement_range(nvbo, domain); 416 } 417 418 int 419 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig) 420 { 421 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 422 struct ttm_buffer_object *bo = &nvbo->bo; 423 bool force = false, evict = false; 424 int ret; 425 426 ret = ttm_bo_reserve(bo, false, false, NULL); 427 if (ret) 428 return ret; 429 430 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && 431 domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) { 432 if (!nvbo->contig) { 433 nvbo->contig = true; 434 force = true; 435 evict = true; 436 } 437 } 438 439 if (nvbo->bo.pin_count) { 440 bool error = evict; 441 442 switch (bo->resource->mem_type) { 443 case TTM_PL_VRAM: 444 error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM); 445 break; 446 case TTM_PL_TT: 447 error |= !(domain & NOUVEAU_GEM_DOMAIN_GART); 448 default: 449 break; 450 } 451 452 if (error) { 453 NV_ERROR(drm, "bo %p pinned elsewhere: " 454 "0x%08x vs 0x%08x\n", bo, 455 bo->resource->mem_type, domain); 456 ret = -EBUSY; 457 } 458 ttm_bo_pin(&nvbo->bo); 459 goto out; 460 } 461 462 if (evict) { 463 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0); 464 ret = nouveau_bo_validate(nvbo, false, false); 465 if (ret) 466 goto out; 467 } 468 469 nouveau_bo_placement_set(nvbo, domain, 0); 470 ret = nouveau_bo_validate(nvbo, false, false); 471 if (ret) 472 goto out; 473 474 ttm_bo_pin(&nvbo->bo); 475 476 switch (bo->resource->mem_type) { 477 case TTM_PL_VRAM: 478 drm->gem.vram_available -= bo->base.size; 479 break; 480 case TTM_PL_TT: 481 drm->gem.gart_available -= bo->base.size; 482 break; 483 default: 484 break; 485 } 486 487 out: 488 if (force && ret) 489 nvbo->contig = false; 490 ttm_bo_unreserve(bo); 491 return ret; 492 } 493 494 int 495 nouveau_bo_unpin(struct nouveau_bo *nvbo) 496 { 497 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 498 struct ttm_buffer_object *bo = &nvbo->bo; 499 int ret; 500 501 ret = ttm_bo_reserve(bo, false, false, NULL); 502 if (ret) 503 return ret; 504 505 ttm_bo_unpin(&nvbo->bo); 506 if (!nvbo->bo.pin_count) { 507 switch (bo->resource->mem_type) { 508 case TTM_PL_VRAM: 509 drm->gem.vram_available += bo->base.size; 510 break; 511 case TTM_PL_TT: 512 drm->gem.gart_available += bo->base.size; 513 break; 514 default: 515 break; 516 } 517 } 518 519 ttm_bo_unreserve(bo); 520 return 0; 521 } 522 523 int 524 nouveau_bo_map(struct nouveau_bo *nvbo) 525 { 526 int ret; 527 528 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); 529 if (ret) 530 return ret; 531 532 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages, &nvbo->kmap); 533 534 ttm_bo_unreserve(&nvbo->bo); 535 return ret; 536 } 537 538 void 539 nouveau_bo_unmap(struct nouveau_bo *nvbo) 540 { 541 if (!nvbo) 542 return; 543 544 ttm_bo_kunmap(&nvbo->kmap); 545 } 546 547 void 548 nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) 549 { 550 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 551 struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; 552 int i, j; 553 554 if (!ttm_dma) 555 return; 556 if (!ttm_dma->pages) { 557 NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma); 558 return; 559 } 560 561 /* Don't waste time looping if the object is coherent */ 562 if (nvbo->force_coherent) 563 return; 564 565 i = 0; 566 while (i < ttm_dma->num_pages) { 567 struct page *p = ttm_dma->pages[i]; 568 size_t num_pages = 1; 569 570 for (j = i + 1; j < ttm_dma->num_pages; ++j) { 571 if (++p != ttm_dma->pages[j]) 572 break; 573 574 ++num_pages; 575 } 576 dma_sync_single_for_device(drm->dev->dev, 577 ttm_dma->dma_address[i], 578 num_pages * PAGE_SIZE, DMA_TO_DEVICE); 579 i += num_pages; 580 } 581 } 582 583 void 584 nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) 585 { 586 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 587 struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; 588 int i, j; 589 590 if (!ttm_dma) 591 return; 592 if (!ttm_dma->pages) { 593 NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma); 594 return; 595 } 596 597 /* Don't waste time looping if the object is coherent */ 598 if (nvbo->force_coherent) 599 return; 600 601 i = 0; 602 while (i < ttm_dma->num_pages) { 603 struct page *p = ttm_dma->pages[i]; 604 size_t num_pages = 1; 605 606 for (j = i + 1; j < ttm_dma->num_pages; ++j) { 607 if (++p != ttm_dma->pages[j]) 608 break; 609 610 ++num_pages; 611 } 612 613 dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i], 614 num_pages * PAGE_SIZE, DMA_FROM_DEVICE); 615 i += num_pages; 616 } 617 } 618 619 void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo) 620 { 621 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 622 struct nouveau_bo *nvbo = nouveau_bo(bo); 623 624 mutex_lock(&drm->ttm.io_reserve_mutex); 625 list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru); 626 mutex_unlock(&drm->ttm.io_reserve_mutex); 627 } 628 629 void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo) 630 { 631 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 632 struct nouveau_bo *nvbo = nouveau_bo(bo); 633 634 mutex_lock(&drm->ttm.io_reserve_mutex); 635 list_del_init(&nvbo->io_reserve_lru); 636 mutex_unlock(&drm->ttm.io_reserve_mutex); 637 } 638 639 int 640 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, 641 bool no_wait_gpu) 642 { 643 struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu }; 644 int ret; 645 646 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx); 647 if (ret) 648 return ret; 649 650 nouveau_bo_sync_for_device(nvbo); 651 652 return 0; 653 } 654 655 void 656 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val) 657 { 658 bool is_iomem; 659 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); 660 661 mem += index; 662 663 if (is_iomem) 664 iowrite16_native(val, (void __force __iomem *)mem); 665 else 666 *mem = val; 667 } 668 669 u32 670 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index) 671 { 672 bool is_iomem; 673 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); 674 675 mem += index; 676 677 if (is_iomem) 678 return ioread32_native((void __force __iomem *)mem); 679 else 680 return *mem; 681 } 682 683 void 684 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val) 685 { 686 bool is_iomem; 687 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); 688 689 mem += index; 690 691 if (is_iomem) 692 iowrite32_native(val, (void __force __iomem *)mem); 693 else 694 *mem = val; 695 } 696 697 static struct ttm_tt * 698 nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) 699 { 700 #if IS_ENABLED(CONFIG_AGP) 701 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 702 703 if (drm->agp.bridge) { 704 return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags); 705 } 706 #endif 707 708 return nouveau_sgdma_create_ttm(bo, page_flags); 709 } 710 711 static int 712 nouveau_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm, 713 struct ttm_resource *reg) 714 { 715 #if IS_ENABLED(CONFIG_AGP) 716 struct nouveau_drm *drm = nouveau_bdev(bdev); 717 #endif 718 if (!reg) 719 return -EINVAL; 720 #if IS_ENABLED(CONFIG_AGP) 721 if (drm->agp.bridge) 722 return ttm_agp_bind(ttm, reg); 723 #endif 724 return nouveau_sgdma_bind(bdev, ttm, reg); 725 } 726 727 static void 728 nouveau_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm) 729 { 730 #if IS_ENABLED(CONFIG_AGP) 731 struct nouveau_drm *drm = nouveau_bdev(bdev); 732 733 if (drm->agp.bridge) { 734 ttm_agp_unbind(ttm); 735 return; 736 } 737 #endif 738 nouveau_sgdma_unbind(bdev, ttm); 739 } 740 741 static void 742 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) 743 { 744 struct nouveau_bo *nvbo = nouveau_bo(bo); 745 746 switch (bo->resource->mem_type) { 747 case TTM_PL_VRAM: 748 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 749 NOUVEAU_GEM_DOMAIN_CPU); 750 break; 751 default: 752 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_CPU, 0); 753 break; 754 } 755 756 *pl = nvbo->placement; 757 } 758 759 static int 760 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, 761 struct ttm_resource *reg) 762 { 763 struct nouveau_mem *old_mem = nouveau_mem(bo->resource); 764 struct nouveau_mem *new_mem = nouveau_mem(reg); 765 struct nvif_vmm *vmm = &drm->client.vmm.vmm; 766 int ret; 767 768 ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0, 769 old_mem->mem.size, &old_mem->vma[0]); 770 if (ret) 771 return ret; 772 773 ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0, 774 new_mem->mem.size, &old_mem->vma[1]); 775 if (ret) 776 goto done; 777 778 ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]); 779 if (ret) 780 goto done; 781 782 ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]); 783 done: 784 if (ret) { 785 nvif_vmm_put(vmm, &old_mem->vma[1]); 786 nvif_vmm_put(vmm, &old_mem->vma[0]); 787 } 788 return 0; 789 } 790 791 static int 792 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, 793 struct ttm_operation_ctx *ctx, 794 struct ttm_resource *new_reg) 795 { 796 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 797 struct nouveau_channel *chan = drm->ttm.chan; 798 struct nouveau_cli *cli = (void *)chan->user.client; 799 struct nouveau_fence *fence; 800 int ret; 801 802 /* create temporary vmas for the transfer and attach them to the 803 * old nvkm_mem node, these will get cleaned up after ttm has 804 * destroyed the ttm_resource 805 */ 806 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { 807 ret = nouveau_bo_move_prep(drm, bo, new_reg); 808 if (ret) 809 return ret; 810 } 811 812 if (drm_drv_uses_atomic_modeset(drm->dev)) 813 mutex_lock(&cli->mutex); 814 else 815 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING); 816 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible); 817 if (ret == 0) { 818 ret = drm->ttm.move(chan, bo, bo->resource, new_reg); 819 if (ret == 0) { 820 ret = nouveau_fence_new(chan, false, &fence); 821 if (ret == 0) { 822 ret = ttm_bo_move_accel_cleanup(bo, 823 &fence->base, 824 evict, false, 825 new_reg); 826 nouveau_fence_unref(&fence); 827 } 828 } 829 } 830 mutex_unlock(&cli->mutex); 831 return ret; 832 } 833 834 void 835 nouveau_bo_move_init(struct nouveau_drm *drm) 836 { 837 static const struct _method_table { 838 const char *name; 839 int engine; 840 s32 oclass; 841 int (*exec)(struct nouveau_channel *, 842 struct ttm_buffer_object *, 843 struct ttm_resource *, struct ttm_resource *); 844 int (*init)(struct nouveau_channel *, u32 handle); 845 } _methods[] = { 846 { "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init }, 847 { "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init }, 848 { "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init }, 849 { "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init }, 850 { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init }, 851 { "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init }, 852 { "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init }, 853 { "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init }, 854 { "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init }, 855 { "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init }, 856 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init }, 857 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init }, 858 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init }, 859 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init }, 860 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init }, 861 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init }, 862 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init }, 863 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init }, 864 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init }, 865 {}, 866 }; 867 const struct _method_table *mthd = _methods; 868 const char *name = "CPU"; 869 int ret; 870 871 do { 872 struct nouveau_channel *chan; 873 874 if (mthd->engine) 875 chan = drm->cechan; 876 else 877 chan = drm->channel; 878 if (chan == NULL) 879 continue; 880 881 ret = nvif_object_ctor(&chan->user, "ttmBoMove", 882 mthd->oclass | (mthd->engine << 16), 883 mthd->oclass, NULL, 0, 884 &drm->ttm.copy); 885 if (ret == 0) { 886 ret = mthd->init(chan, drm->ttm.copy.handle); 887 if (ret) { 888 nvif_object_dtor(&drm->ttm.copy); 889 continue; 890 } 891 892 drm->ttm.move = mthd->exec; 893 drm->ttm.chan = chan; 894 name = mthd->name; 895 break; 896 } 897 } while ((++mthd)->exec); 898 899 NV_INFO(drm, "MM: using %s for buffer copies\n", name); 900 } 901 902 static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, 903 struct ttm_resource *new_reg) 904 { 905 struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL; 906 struct nouveau_bo *nvbo = nouveau_bo(bo); 907 struct nouveau_vma *vma; 908 909 /* ttm can now (stupidly) pass the driver bos it didn't create... */ 910 if (bo->destroy != nouveau_bo_del_ttm) 911 return; 912 913 nouveau_bo_del_io_reserve_lru(bo); 914 915 if (mem && new_reg->mem_type != TTM_PL_SYSTEM && 916 mem->mem.page == nvbo->page) { 917 list_for_each_entry(vma, &nvbo->vma_list, head) { 918 nouveau_vma_map(vma, mem); 919 } 920 } else { 921 list_for_each_entry(vma, &nvbo->vma_list, head) { 922 WARN_ON(ttm_bo_wait(bo, false, false)); 923 nouveau_vma_unmap(vma); 924 } 925 } 926 927 if (new_reg) 928 nvbo->offset = (new_reg->start << PAGE_SHIFT); 929 930 } 931 932 static int 933 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg, 934 struct nouveau_drm_tile **new_tile) 935 { 936 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 937 struct drm_device *dev = drm->dev; 938 struct nouveau_bo *nvbo = nouveau_bo(bo); 939 u64 offset = new_reg->start << PAGE_SHIFT; 940 941 *new_tile = NULL; 942 if (new_reg->mem_type != TTM_PL_VRAM) 943 return 0; 944 945 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { 946 *new_tile = nv10_bo_set_tiling(dev, offset, bo->base.size, 947 nvbo->mode, nvbo->zeta); 948 } 949 950 return 0; 951 } 952 953 static void 954 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, 955 struct nouveau_drm_tile *new_tile, 956 struct nouveau_drm_tile **old_tile) 957 { 958 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 959 struct drm_device *dev = drm->dev; 960 struct dma_fence *fence = dma_resv_excl_fence(bo->base.resv); 961 962 nv10_bo_put_tile_region(dev, *old_tile, fence); 963 *old_tile = new_tile; 964 } 965 966 static int 967 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, 968 struct ttm_operation_ctx *ctx, 969 struct ttm_resource *new_reg, 970 struct ttm_place *hop) 971 { 972 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 973 struct nouveau_bo *nvbo = nouveau_bo(bo); 974 struct ttm_resource *old_reg = bo->resource; 975 struct nouveau_drm_tile *new_tile = NULL; 976 int ret = 0; 977 978 979 if (new_reg->mem_type == TTM_PL_TT) { 980 ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg); 981 if (ret) 982 return ret; 983 } 984 985 nouveau_bo_move_ntfy(bo, new_reg); 986 ret = ttm_bo_wait_ctx(bo, ctx); 987 if (ret) 988 goto out_ntfy; 989 990 if (nvbo->bo.pin_count) 991 NV_WARN(drm, "Moving pinned object %p!\n", nvbo); 992 993 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { 994 ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile); 995 if (ret) 996 goto out_ntfy; 997 } 998 999 /* Fake bo copy. */ 1000 if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) { 1001 ttm_bo_move_null(bo, new_reg); 1002 goto out; 1003 } 1004 1005 if (old_reg->mem_type == TTM_PL_SYSTEM && 1006 new_reg->mem_type == TTM_PL_TT) { 1007 ttm_bo_move_null(bo, new_reg); 1008 goto out; 1009 } 1010 1011 if (old_reg->mem_type == TTM_PL_TT && 1012 new_reg->mem_type == TTM_PL_SYSTEM) { 1013 nouveau_ttm_tt_unbind(bo->bdev, bo->ttm); 1014 ttm_resource_free(bo, &bo->resource); 1015 ttm_bo_assign_mem(bo, new_reg); 1016 goto out; 1017 } 1018 1019 /* Hardware assisted copy. */ 1020 if (drm->ttm.move) { 1021 if ((old_reg->mem_type == TTM_PL_SYSTEM && 1022 new_reg->mem_type == TTM_PL_VRAM) || 1023 (old_reg->mem_type == TTM_PL_VRAM && 1024 new_reg->mem_type == TTM_PL_SYSTEM)) { 1025 hop->fpfn = 0; 1026 hop->lpfn = 0; 1027 hop->mem_type = TTM_PL_TT; 1028 hop->flags = 0; 1029 return -EMULTIHOP; 1030 } 1031 ret = nouveau_bo_move_m2mf(bo, evict, ctx, 1032 new_reg); 1033 } else 1034 ret = -ENODEV; 1035 1036 if (ret) { 1037 /* Fallback to software copy. */ 1038 ret = ttm_bo_move_memcpy(bo, ctx, new_reg); 1039 } 1040 1041 out: 1042 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { 1043 if (ret) 1044 nouveau_bo_vm_cleanup(bo, NULL, &new_tile); 1045 else 1046 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); 1047 } 1048 out_ntfy: 1049 if (ret) { 1050 nouveau_bo_move_ntfy(bo, bo->resource); 1051 } 1052 return ret; 1053 } 1054 1055 static void 1056 nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm, 1057 struct ttm_resource *reg) 1058 { 1059 struct nouveau_mem *mem = nouveau_mem(reg); 1060 1061 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { 1062 switch (reg->mem_type) { 1063 case TTM_PL_TT: 1064 if (mem->kind) 1065 nvif_object_unmap_handle(&mem->mem.object); 1066 break; 1067 case TTM_PL_VRAM: 1068 nvif_object_unmap_handle(&mem->mem.object); 1069 break; 1070 default: 1071 break; 1072 } 1073 } 1074 } 1075 1076 static int 1077 nouveau_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *reg) 1078 { 1079 struct nouveau_drm *drm = nouveau_bdev(bdev); 1080 struct nvkm_device *device = nvxx_device(&drm->client.device); 1081 struct nouveau_mem *mem = nouveau_mem(reg); 1082 struct nvif_mmu *mmu = &drm->client.mmu; 1083 int ret; 1084 1085 mutex_lock(&drm->ttm.io_reserve_mutex); 1086 retry: 1087 switch (reg->mem_type) { 1088 case TTM_PL_SYSTEM: 1089 /* System memory */ 1090 ret = 0; 1091 goto out; 1092 case TTM_PL_TT: 1093 #if IS_ENABLED(CONFIG_AGP) 1094 if (drm->agp.bridge) { 1095 reg->bus.offset = (reg->start << PAGE_SHIFT) + 1096 drm->agp.base; 1097 reg->bus.is_iomem = !drm->agp.cma; 1098 reg->bus.caching = ttm_write_combined; 1099 } 1100 #endif 1101 if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || 1102 !mem->kind) { 1103 /* untiled */ 1104 ret = 0; 1105 break; 1106 } 1107 fallthrough; /* tiled memory */ 1108 case TTM_PL_VRAM: 1109 reg->bus.offset = (reg->start << PAGE_SHIFT) + 1110 device->func->resource_addr(device, 1); 1111 reg->bus.is_iomem = true; 1112 1113 /* Some BARs do not support being ioremapped WC */ 1114 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && 1115 mmu->type[drm->ttm.type_vram].type & NVIF_MEM_UNCACHED) 1116 reg->bus.caching = ttm_uncached; 1117 else 1118 reg->bus.caching = ttm_write_combined; 1119 1120 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { 1121 union { 1122 struct nv50_mem_map_v0 nv50; 1123 struct gf100_mem_map_v0 gf100; 1124 } args; 1125 u64 handle, length; 1126 u32 argc = 0; 1127 1128 switch (mem->mem.object.oclass) { 1129 case NVIF_CLASS_MEM_NV50: 1130 args.nv50.version = 0; 1131 args.nv50.ro = 0; 1132 args.nv50.kind = mem->kind; 1133 args.nv50.comp = mem->comp; 1134 argc = sizeof(args.nv50); 1135 break; 1136 case NVIF_CLASS_MEM_GF100: 1137 args.gf100.version = 0; 1138 args.gf100.ro = 0; 1139 args.gf100.kind = mem->kind; 1140 argc = sizeof(args.gf100); 1141 break; 1142 default: 1143 WARN_ON(1); 1144 break; 1145 } 1146 1147 ret = nvif_object_map_handle(&mem->mem.object, 1148 &args, argc, 1149 &handle, &length); 1150 if (ret != 1) { 1151 if (WARN_ON(ret == 0)) 1152 ret = -EINVAL; 1153 goto out; 1154 } 1155 1156 reg->bus.offset = handle; 1157 } 1158 ret = 0; 1159 break; 1160 default: 1161 ret = -EINVAL; 1162 } 1163 1164 out: 1165 if (ret == -ENOSPC) { 1166 struct nouveau_bo *nvbo; 1167 1168 nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru, 1169 typeof(*nvbo), 1170 io_reserve_lru); 1171 if (nvbo) { 1172 list_del_init(&nvbo->io_reserve_lru); 1173 drm_vma_node_unmap(&nvbo->bo.base.vma_node, 1174 bdev->dev_mapping); 1175 nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource); 1176 goto retry; 1177 } 1178 1179 } 1180 mutex_unlock(&drm->ttm.io_reserve_mutex); 1181 return ret; 1182 } 1183 1184 static void 1185 nouveau_ttm_io_mem_free(struct ttm_device *bdev, struct ttm_resource *reg) 1186 { 1187 struct nouveau_drm *drm = nouveau_bdev(bdev); 1188 1189 mutex_lock(&drm->ttm.io_reserve_mutex); 1190 nouveau_ttm_io_mem_free_locked(drm, reg); 1191 mutex_unlock(&drm->ttm.io_reserve_mutex); 1192 } 1193 1194 vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) 1195 { 1196 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1197 struct nouveau_bo *nvbo = nouveau_bo(bo); 1198 struct nvkm_device *device = nvxx_device(&drm->client.device); 1199 u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT; 1200 int i, ret; 1201 1202 /* as long as the bo isn't in vram, and isn't tiled, we've got 1203 * nothing to do here. 1204 */ 1205 if (bo->resource->mem_type != TTM_PL_VRAM) { 1206 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || 1207 !nvbo->kind) 1208 return 0; 1209 1210 if (bo->resource->mem_type != TTM_PL_SYSTEM) 1211 return 0; 1212 1213 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0); 1214 1215 } else { 1216 /* make sure bo is in mappable vram */ 1217 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA || 1218 bo->resource->start + bo->resource->num_pages < mappable) 1219 return 0; 1220 1221 for (i = 0; i < nvbo->placement.num_placement; ++i) { 1222 nvbo->placements[i].fpfn = 0; 1223 nvbo->placements[i].lpfn = mappable; 1224 } 1225 1226 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) { 1227 nvbo->busy_placements[i].fpfn = 0; 1228 nvbo->busy_placements[i].lpfn = mappable; 1229 } 1230 1231 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0); 1232 } 1233 1234 ret = nouveau_bo_validate(nvbo, false, false); 1235 if (unlikely(ret == -EBUSY || ret == -ERESTARTSYS)) 1236 return VM_FAULT_NOPAGE; 1237 else if (unlikely(ret)) 1238 return VM_FAULT_SIGBUS; 1239 1240 ttm_bo_move_to_lru_tail_unlocked(bo); 1241 return 0; 1242 } 1243 1244 static int 1245 nouveau_ttm_tt_populate(struct ttm_device *bdev, 1246 struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) 1247 { 1248 struct ttm_tt *ttm_dma = (void *)ttm; 1249 struct nouveau_drm *drm; 1250 struct device *dev; 1251 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); 1252 1253 if (ttm_tt_is_populated(ttm)) 1254 return 0; 1255 1256 if (slave && ttm->sg) { 1257 drm_prime_sg_to_dma_addr_array(ttm->sg, ttm_dma->dma_address, 1258 ttm->num_pages); 1259 return 0; 1260 } 1261 1262 drm = nouveau_bdev(bdev); 1263 dev = drm->dev->dev; 1264 1265 return ttm_pool_alloc(&drm->ttm.bdev.pool, ttm, ctx); 1266 } 1267 1268 static void 1269 nouveau_ttm_tt_unpopulate(struct ttm_device *bdev, 1270 struct ttm_tt *ttm) 1271 { 1272 struct nouveau_drm *drm; 1273 struct device *dev; 1274 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); 1275 1276 if (slave) 1277 return; 1278 1279 drm = nouveau_bdev(bdev); 1280 dev = drm->dev->dev; 1281 1282 return ttm_pool_free(&drm->ttm.bdev.pool, ttm); 1283 } 1284 1285 static void 1286 nouveau_ttm_tt_destroy(struct ttm_device *bdev, 1287 struct ttm_tt *ttm) 1288 { 1289 #if IS_ENABLED(CONFIG_AGP) 1290 struct nouveau_drm *drm = nouveau_bdev(bdev); 1291 if (drm->agp.bridge) { 1292 ttm_agp_unbind(ttm); 1293 ttm_tt_destroy_common(bdev, ttm); 1294 ttm_agp_destroy(ttm); 1295 return; 1296 } 1297 #endif 1298 nouveau_sgdma_destroy(bdev, ttm); 1299 } 1300 1301 void 1302 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive) 1303 { 1304 struct dma_resv *resv = nvbo->bo.base.resv; 1305 1306 if (exclusive) 1307 dma_resv_add_excl_fence(resv, &fence->base); 1308 else if (fence) 1309 dma_resv_add_shared_fence(resv, &fence->base); 1310 } 1311 1312 static void 1313 nouveau_bo_delete_mem_notify(struct ttm_buffer_object *bo) 1314 { 1315 nouveau_bo_move_ntfy(bo, NULL); 1316 } 1317 1318 struct ttm_device_funcs nouveau_bo_driver = { 1319 .ttm_tt_create = &nouveau_ttm_tt_create, 1320 .ttm_tt_populate = &nouveau_ttm_tt_populate, 1321 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate, 1322 .ttm_tt_destroy = &nouveau_ttm_tt_destroy, 1323 .eviction_valuable = ttm_bo_eviction_valuable, 1324 .evict_flags = nouveau_bo_evict_flags, 1325 .delete_mem_notify = nouveau_bo_delete_mem_notify, 1326 .move = nouveau_bo_move, 1327 .io_mem_reserve = &nouveau_ttm_io_mem_reserve, 1328 .io_mem_free = &nouveau_ttm_io_mem_free, 1329 }; 1330