1 /* 2 * Copyright 2007 Dave Airlied 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 */ 24 /* 25 * Authors: Dave Airlied <airlied@linux.ie> 26 * Ben Skeggs <darktama@iinet.net.au> 27 * Jeremy Kolb <jkolb@brandeis.edu> 28 */ 29 30 #include <linux/dma-mapping.h> 31 32 #include "nouveau_drv.h" 33 #include "nouveau_chan.h" 34 #include "nouveau_fence.h" 35 36 #include "nouveau_bo.h" 37 #include "nouveau_ttm.h" 38 #include "nouveau_gem.h" 39 #include "nouveau_mem.h" 40 #include "nouveau_vmm.h" 41 42 #include <nvif/class.h> 43 #include <nvif/if500b.h> 44 #include <nvif/if900b.h> 45 46 static int nouveau_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm, 47 struct ttm_resource *reg); 48 static void nouveau_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm); 49 50 /* 51 * NV10-NV40 tiling helpers 52 */ 53 54 static void 55 nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg, 56 u32 addr, u32 size, u32 pitch, u32 flags) 57 { 58 struct nouveau_drm *drm = nouveau_drm(dev); 59 int i = reg - drm->tile.reg; 60 struct nvkm_fb *fb = nvxx_fb(&drm->client.device); 61 struct nvkm_fb_tile *tile = &fb->tile.region[i]; 62 63 nouveau_fence_unref(®->fence); 64 65 if (tile->pitch) 66 nvkm_fb_tile_fini(fb, i, tile); 67 68 if (pitch) 69 nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile); 70 71 nvkm_fb_tile_prog(fb, i, tile); 72 } 73 74 static struct nouveau_drm_tile * 75 nv10_bo_get_tile_region(struct drm_device *dev, int i) 76 { 77 struct nouveau_drm *drm = nouveau_drm(dev); 78 struct nouveau_drm_tile *tile = &drm->tile.reg[i]; 79 80 spin_lock(&drm->tile.lock); 81 82 if (!tile->used && 83 (!tile->fence || nouveau_fence_done(tile->fence))) 84 tile->used = true; 85 else 86 tile = NULL; 87 88 spin_unlock(&drm->tile.lock); 89 return tile; 90 } 91 92 static void 93 nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, 94 struct dma_fence *fence) 95 { 96 struct nouveau_drm *drm = nouveau_drm(dev); 97 98 if (tile) { 99 spin_lock(&drm->tile.lock); 100 tile->fence = (struct nouveau_fence *)dma_fence_get(fence); 101 tile->used = false; 102 spin_unlock(&drm->tile.lock); 103 } 104 } 105 106 static struct nouveau_drm_tile * 107 nv10_bo_set_tiling(struct drm_device *dev, u32 addr, 108 u32 size, u32 pitch, u32 zeta) 109 { 110 struct nouveau_drm *drm = nouveau_drm(dev); 111 struct nvkm_fb *fb = nvxx_fb(&drm->client.device); 112 struct nouveau_drm_tile *tile, *found = NULL; 113 int i; 114 115 for (i = 0; i < fb->tile.regions; i++) { 116 tile = nv10_bo_get_tile_region(dev, i); 117 118 if (pitch && !found) { 119 found = tile; 120 continue; 121 122 } else if (tile && fb->tile.region[i].pitch) { 123 /* Kill an unused tile region. */ 124 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0); 125 } 126 127 nv10_bo_put_tile_region(dev, tile, NULL); 128 } 129 130 if (found) 131 nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta); 132 return found; 133 } 134 135 static void 136 nouveau_bo_del_ttm(struct ttm_buffer_object *bo) 137 { 138 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 139 struct drm_device *dev = drm->dev; 140 struct nouveau_bo *nvbo = nouveau_bo(bo); 141 142 WARN_ON(nvbo->bo.pin_count > 0); 143 nouveau_bo_del_io_reserve_lru(bo); 144 nv10_bo_put_tile_region(dev, nvbo->tile, NULL); 145 146 /* 147 * If nouveau_bo_new() allocated this buffer, the GEM object was never 148 * initialized, so don't attempt to release it. 149 */ 150 if (bo->base.dev) 151 drm_gem_object_release(&bo->base); 152 153 kfree(nvbo); 154 } 155 156 static inline u64 157 roundup_64(u64 x, u32 y) 158 { 159 x += y - 1; 160 do_div(x, y); 161 return x * y; 162 } 163 164 static void 165 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size) 166 { 167 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 168 struct nvif_device *device = &drm->client.device; 169 170 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) { 171 if (nvbo->mode) { 172 if (device->info.chipset >= 0x40) { 173 *align = 65536; 174 *size = roundup_64(*size, 64 * nvbo->mode); 175 176 } else if (device->info.chipset >= 0x30) { 177 *align = 32768; 178 *size = roundup_64(*size, 64 * nvbo->mode); 179 180 } else if (device->info.chipset >= 0x20) { 181 *align = 16384; 182 *size = roundup_64(*size, 64 * nvbo->mode); 183 184 } else if (device->info.chipset >= 0x10) { 185 *align = 16384; 186 *size = roundup_64(*size, 32 * nvbo->mode); 187 } 188 } 189 } else { 190 *size = roundup_64(*size, (1 << nvbo->page)); 191 *align = max((1 << nvbo->page), *align); 192 } 193 194 *size = roundup_64(*size, PAGE_SIZE); 195 } 196 197 struct nouveau_bo * 198 nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, 199 u32 tile_mode, u32 tile_flags) 200 { 201 struct nouveau_drm *drm = cli->drm; 202 struct nouveau_bo *nvbo; 203 struct nvif_mmu *mmu = &cli->mmu; 204 struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm; 205 int i, pi = -1; 206 207 if (!*size) { 208 NV_WARN(drm, "skipped size %016llx\n", *size); 209 return ERR_PTR(-EINVAL); 210 } 211 212 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); 213 if (!nvbo) 214 return ERR_PTR(-ENOMEM); 215 INIT_LIST_HEAD(&nvbo->head); 216 INIT_LIST_HEAD(&nvbo->entry); 217 INIT_LIST_HEAD(&nvbo->vma_list); 218 nvbo->bo.bdev = &drm->ttm.bdev; 219 220 /* This is confusing, and doesn't actually mean we want an uncached 221 * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated 222 * into in nouveau_gem_new(). 223 */ 224 if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) { 225 /* Determine if we can get a cache-coherent map, forcing 226 * uncached mapping if we can't. 227 */ 228 if (!nouveau_drm_use_coherent_gpu_mapping(drm)) 229 nvbo->force_coherent = true; 230 } 231 232 if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) { 233 nvbo->kind = (tile_flags & 0x0000ff00) >> 8; 234 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { 235 kfree(nvbo); 236 return ERR_PTR(-EINVAL); 237 } 238 239 nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind; 240 } else 241 if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { 242 nvbo->kind = (tile_flags & 0x00007f00) >> 8; 243 nvbo->comp = (tile_flags & 0x00030000) >> 16; 244 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { 245 kfree(nvbo); 246 return ERR_PTR(-EINVAL); 247 } 248 } else { 249 nvbo->zeta = (tile_flags & 0x00000007); 250 } 251 nvbo->mode = tile_mode; 252 nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG); 253 254 /* Determine the desirable target GPU page size for the buffer. */ 255 for (i = 0; i < vmm->page_nr; i++) { 256 /* Because we cannot currently allow VMM maps to fail 257 * during buffer migration, we need to determine page 258 * size for the buffer up-front, and pre-allocate its 259 * page tables. 260 * 261 * Skip page sizes that can't support needed domains. 262 */ 263 if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE && 264 (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram) 265 continue; 266 if ((domain & NOUVEAU_GEM_DOMAIN_GART) && 267 (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT)) 268 continue; 269 270 /* Select this page size if it's the first that supports 271 * the potential memory domains, or when it's compatible 272 * with the requested compression settings. 273 */ 274 if (pi < 0 || !nvbo->comp || vmm->page[i].comp) 275 pi = i; 276 277 /* Stop once the buffer is larger than the current page size. */ 278 if (*size >= 1ULL << vmm->page[i].shift) 279 break; 280 } 281 282 if (WARN_ON(pi < 0)) 283 return ERR_PTR(-EINVAL); 284 285 /* Disable compression if suitable settings couldn't be found. */ 286 if (nvbo->comp && !vmm->page[pi].comp) { 287 if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100) 288 nvbo->kind = mmu->kind[nvbo->kind]; 289 nvbo->comp = 0; 290 } 291 nvbo->page = vmm->page[pi].shift; 292 293 nouveau_bo_fixup_align(nvbo, align, size); 294 295 return nvbo; 296 } 297 298 int 299 nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain, 300 struct sg_table *sg, struct dma_resv *robj) 301 { 302 int type = sg ? ttm_bo_type_sg : ttm_bo_type_device; 303 int ret; 304 305 nouveau_bo_placement_set(nvbo, domain, 0); 306 INIT_LIST_HEAD(&nvbo->io_reserve_lru); 307 308 ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type, 309 &nvbo->placement, align >> PAGE_SHIFT, false, sg, 310 robj, nouveau_bo_del_ttm); 311 if (ret) { 312 /* ttm will call nouveau_bo_del_ttm if it fails.. */ 313 return ret; 314 } 315 316 return 0; 317 } 318 319 int 320 nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, 321 uint32_t domain, uint32_t tile_mode, uint32_t tile_flags, 322 struct sg_table *sg, struct dma_resv *robj, 323 struct nouveau_bo **pnvbo) 324 { 325 struct nouveau_bo *nvbo; 326 int ret; 327 328 nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode, 329 tile_flags); 330 if (IS_ERR(nvbo)) 331 return PTR_ERR(nvbo); 332 333 ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj); 334 if (ret) 335 return ret; 336 337 *pnvbo = nvbo; 338 return 0; 339 } 340 341 static void 342 set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t domain) 343 { 344 *n = 0; 345 346 if (domain & NOUVEAU_GEM_DOMAIN_VRAM) { 347 pl[*n].mem_type = TTM_PL_VRAM; 348 pl[*n].flags = 0; 349 (*n)++; 350 } 351 if (domain & NOUVEAU_GEM_DOMAIN_GART) { 352 pl[*n].mem_type = TTM_PL_TT; 353 pl[*n].flags = 0; 354 (*n)++; 355 } 356 if (domain & NOUVEAU_GEM_DOMAIN_CPU) { 357 pl[*n].mem_type = TTM_PL_SYSTEM; 358 pl[(*n)++].flags = 0; 359 } 360 } 361 362 static void 363 set_placement_range(struct nouveau_bo *nvbo, uint32_t domain) 364 { 365 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 366 u64 vram_size = drm->client.device.info.ram_size; 367 unsigned i, fpfn, lpfn; 368 369 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && 370 nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) && 371 nvbo->bo.base.size < vram_size / 4) { 372 /* 373 * Make sure that the color and depth buffers are handled 374 * by independent memory controller units. Up to a 9x 375 * speed up when alpha-blending and depth-test are enabled 376 * at the same time. 377 */ 378 if (nvbo->zeta) { 379 fpfn = (vram_size / 2) >> PAGE_SHIFT; 380 lpfn = ~0; 381 } else { 382 fpfn = 0; 383 lpfn = (vram_size / 2) >> PAGE_SHIFT; 384 } 385 for (i = 0; i < nvbo->placement.num_placement; ++i) { 386 nvbo->placements[i].fpfn = fpfn; 387 nvbo->placements[i].lpfn = lpfn; 388 } 389 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) { 390 nvbo->busy_placements[i].fpfn = fpfn; 391 nvbo->busy_placements[i].lpfn = lpfn; 392 } 393 } 394 } 395 396 void 397 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain, 398 uint32_t busy) 399 { 400 struct ttm_placement *pl = &nvbo->placement; 401 402 pl->placement = nvbo->placements; 403 set_placement_list(nvbo->placements, &pl->num_placement, domain); 404 405 pl->busy_placement = nvbo->busy_placements; 406 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, 407 domain | busy); 408 409 set_placement_range(nvbo, domain); 410 } 411 412 int 413 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig) 414 { 415 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 416 struct ttm_buffer_object *bo = &nvbo->bo; 417 bool force = false, evict = false; 418 int ret; 419 420 ret = ttm_bo_reserve(bo, false, false, NULL); 421 if (ret) 422 return ret; 423 424 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && 425 domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) { 426 if (!nvbo->contig) { 427 nvbo->contig = true; 428 force = true; 429 evict = true; 430 } 431 } 432 433 if (nvbo->bo.pin_count) { 434 bool error = evict; 435 436 switch (bo->resource->mem_type) { 437 case TTM_PL_VRAM: 438 error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM); 439 break; 440 case TTM_PL_TT: 441 error |= !(domain & NOUVEAU_GEM_DOMAIN_GART); 442 break; 443 default: 444 break; 445 } 446 447 if (error) { 448 NV_ERROR(drm, "bo %p pinned elsewhere: " 449 "0x%08x vs 0x%08x\n", bo, 450 bo->resource->mem_type, domain); 451 ret = -EBUSY; 452 } 453 ttm_bo_pin(&nvbo->bo); 454 goto out; 455 } 456 457 if (evict) { 458 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0); 459 ret = nouveau_bo_validate(nvbo, false, false); 460 if (ret) 461 goto out; 462 } 463 464 nouveau_bo_placement_set(nvbo, domain, 0); 465 ret = nouveau_bo_validate(nvbo, false, false); 466 if (ret) 467 goto out; 468 469 ttm_bo_pin(&nvbo->bo); 470 471 switch (bo->resource->mem_type) { 472 case TTM_PL_VRAM: 473 drm->gem.vram_available -= bo->base.size; 474 break; 475 case TTM_PL_TT: 476 drm->gem.gart_available -= bo->base.size; 477 break; 478 default: 479 break; 480 } 481 482 out: 483 if (force && ret) 484 nvbo->contig = false; 485 ttm_bo_unreserve(bo); 486 return ret; 487 } 488 489 int 490 nouveau_bo_unpin(struct nouveau_bo *nvbo) 491 { 492 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 493 struct ttm_buffer_object *bo = &nvbo->bo; 494 int ret; 495 496 ret = ttm_bo_reserve(bo, false, false, NULL); 497 if (ret) 498 return ret; 499 500 ttm_bo_unpin(&nvbo->bo); 501 if (!nvbo->bo.pin_count) { 502 switch (bo->resource->mem_type) { 503 case TTM_PL_VRAM: 504 drm->gem.vram_available += bo->base.size; 505 break; 506 case TTM_PL_TT: 507 drm->gem.gart_available += bo->base.size; 508 break; 509 default: 510 break; 511 } 512 } 513 514 ttm_bo_unreserve(bo); 515 return 0; 516 } 517 518 int 519 nouveau_bo_map(struct nouveau_bo *nvbo) 520 { 521 int ret; 522 523 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); 524 if (ret) 525 return ret; 526 527 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages, &nvbo->kmap); 528 529 ttm_bo_unreserve(&nvbo->bo); 530 return ret; 531 } 532 533 void 534 nouveau_bo_unmap(struct nouveau_bo *nvbo) 535 { 536 if (!nvbo) 537 return; 538 539 ttm_bo_kunmap(&nvbo->kmap); 540 } 541 542 void 543 nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) 544 { 545 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 546 struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; 547 int i, j; 548 549 if (!ttm_dma || !ttm_dma->dma_address) 550 return; 551 if (!ttm_dma->pages) { 552 NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma); 553 return; 554 } 555 556 /* Don't waste time looping if the object is coherent */ 557 if (nvbo->force_coherent) 558 return; 559 560 i = 0; 561 while (i < ttm_dma->num_pages) { 562 struct page *p = ttm_dma->pages[i]; 563 size_t num_pages = 1; 564 565 for (j = i + 1; j < ttm_dma->num_pages; ++j) { 566 if (++p != ttm_dma->pages[j]) 567 break; 568 569 ++num_pages; 570 } 571 dma_sync_single_for_device(drm->dev->dev, 572 ttm_dma->dma_address[i], 573 num_pages * PAGE_SIZE, DMA_TO_DEVICE); 574 i += num_pages; 575 } 576 } 577 578 void 579 nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) 580 { 581 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 582 struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; 583 int i, j; 584 585 if (!ttm_dma || !ttm_dma->dma_address) 586 return; 587 if (!ttm_dma->pages) { 588 NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma); 589 return; 590 } 591 592 /* Don't waste time looping if the object is coherent */ 593 if (nvbo->force_coherent) 594 return; 595 596 i = 0; 597 while (i < ttm_dma->num_pages) { 598 struct page *p = ttm_dma->pages[i]; 599 size_t num_pages = 1; 600 601 for (j = i + 1; j < ttm_dma->num_pages; ++j) { 602 if (++p != ttm_dma->pages[j]) 603 break; 604 605 ++num_pages; 606 } 607 608 dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i], 609 num_pages * PAGE_SIZE, DMA_FROM_DEVICE); 610 i += num_pages; 611 } 612 } 613 614 void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo) 615 { 616 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 617 struct nouveau_bo *nvbo = nouveau_bo(bo); 618 619 mutex_lock(&drm->ttm.io_reserve_mutex); 620 list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru); 621 mutex_unlock(&drm->ttm.io_reserve_mutex); 622 } 623 624 void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo) 625 { 626 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 627 struct nouveau_bo *nvbo = nouveau_bo(bo); 628 629 mutex_lock(&drm->ttm.io_reserve_mutex); 630 list_del_init(&nvbo->io_reserve_lru); 631 mutex_unlock(&drm->ttm.io_reserve_mutex); 632 } 633 634 int 635 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, 636 bool no_wait_gpu) 637 { 638 struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu }; 639 int ret; 640 641 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx); 642 if (ret) 643 return ret; 644 645 nouveau_bo_sync_for_device(nvbo); 646 647 return 0; 648 } 649 650 void 651 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val) 652 { 653 bool is_iomem; 654 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); 655 656 mem += index; 657 658 if (is_iomem) 659 iowrite16_native(val, (void __force __iomem *)mem); 660 else 661 *mem = val; 662 } 663 664 u32 665 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index) 666 { 667 bool is_iomem; 668 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); 669 670 mem += index; 671 672 if (is_iomem) 673 return ioread32_native((void __force __iomem *)mem); 674 else 675 return *mem; 676 } 677 678 void 679 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val) 680 { 681 bool is_iomem; 682 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); 683 684 mem += index; 685 686 if (is_iomem) 687 iowrite32_native(val, (void __force __iomem *)mem); 688 else 689 *mem = val; 690 } 691 692 static struct ttm_tt * 693 nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) 694 { 695 #if IS_ENABLED(CONFIG_AGP) 696 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 697 698 if (drm->agp.bridge) { 699 return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags); 700 } 701 #endif 702 703 return nouveau_sgdma_create_ttm(bo, page_flags); 704 } 705 706 static int 707 nouveau_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm, 708 struct ttm_resource *reg) 709 { 710 #if IS_ENABLED(CONFIG_AGP) 711 struct nouveau_drm *drm = nouveau_bdev(bdev); 712 #endif 713 if (!reg) 714 return -EINVAL; 715 #if IS_ENABLED(CONFIG_AGP) 716 if (drm->agp.bridge) 717 return ttm_agp_bind(ttm, reg); 718 #endif 719 return nouveau_sgdma_bind(bdev, ttm, reg); 720 } 721 722 static void 723 nouveau_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm) 724 { 725 #if IS_ENABLED(CONFIG_AGP) 726 struct nouveau_drm *drm = nouveau_bdev(bdev); 727 728 if (drm->agp.bridge) { 729 ttm_agp_unbind(ttm); 730 return; 731 } 732 #endif 733 nouveau_sgdma_unbind(bdev, ttm); 734 } 735 736 static void 737 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) 738 { 739 struct nouveau_bo *nvbo = nouveau_bo(bo); 740 741 switch (bo->resource->mem_type) { 742 case TTM_PL_VRAM: 743 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 744 NOUVEAU_GEM_DOMAIN_CPU); 745 break; 746 default: 747 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_CPU, 0); 748 break; 749 } 750 751 *pl = nvbo->placement; 752 } 753 754 static int 755 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, 756 struct ttm_resource *reg) 757 { 758 struct nouveau_mem *old_mem = nouveau_mem(bo->resource); 759 struct nouveau_mem *new_mem = nouveau_mem(reg); 760 struct nvif_vmm *vmm = &drm->client.vmm.vmm; 761 int ret; 762 763 ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0, 764 old_mem->mem.size, &old_mem->vma[0]); 765 if (ret) 766 return ret; 767 768 ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0, 769 new_mem->mem.size, &old_mem->vma[1]); 770 if (ret) 771 goto done; 772 773 ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]); 774 if (ret) 775 goto done; 776 777 ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]); 778 done: 779 if (ret) { 780 nvif_vmm_put(vmm, &old_mem->vma[1]); 781 nvif_vmm_put(vmm, &old_mem->vma[0]); 782 } 783 return 0; 784 } 785 786 static int 787 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, 788 struct ttm_operation_ctx *ctx, 789 struct ttm_resource *new_reg) 790 { 791 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 792 struct nouveau_channel *chan = drm->ttm.chan; 793 struct nouveau_cli *cli = (void *)chan->user.client; 794 struct nouveau_fence *fence; 795 int ret; 796 797 /* create temporary vmas for the transfer and attach them to the 798 * old nvkm_mem node, these will get cleaned up after ttm has 799 * destroyed the ttm_resource 800 */ 801 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { 802 ret = nouveau_bo_move_prep(drm, bo, new_reg); 803 if (ret) 804 return ret; 805 } 806 807 if (drm_drv_uses_atomic_modeset(drm->dev)) 808 mutex_lock(&cli->mutex); 809 else 810 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING); 811 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible); 812 if (ret == 0) { 813 ret = drm->ttm.move(chan, bo, bo->resource, new_reg); 814 if (ret == 0) { 815 ret = nouveau_fence_new(chan, false, &fence); 816 if (ret == 0) { 817 ret = ttm_bo_move_accel_cleanup(bo, 818 &fence->base, 819 evict, false, 820 new_reg); 821 nouveau_fence_unref(&fence); 822 } 823 } 824 } 825 mutex_unlock(&cli->mutex); 826 return ret; 827 } 828 829 void 830 nouveau_bo_move_init(struct nouveau_drm *drm) 831 { 832 static const struct _method_table { 833 const char *name; 834 int engine; 835 s32 oclass; 836 int (*exec)(struct nouveau_channel *, 837 struct ttm_buffer_object *, 838 struct ttm_resource *, struct ttm_resource *); 839 int (*init)(struct nouveau_channel *, u32 handle); 840 } _methods[] = { 841 { "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init }, 842 { "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init }, 843 { "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init }, 844 { "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init }, 845 { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init }, 846 { "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init }, 847 { "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init }, 848 { "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init }, 849 { "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init }, 850 { "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init }, 851 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init }, 852 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init }, 853 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init }, 854 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init }, 855 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init }, 856 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init }, 857 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init }, 858 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init }, 859 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init }, 860 {}, 861 }; 862 const struct _method_table *mthd = _methods; 863 const char *name = "CPU"; 864 int ret; 865 866 do { 867 struct nouveau_channel *chan; 868 869 if (mthd->engine) 870 chan = drm->cechan; 871 else 872 chan = drm->channel; 873 if (chan == NULL) 874 continue; 875 876 ret = nvif_object_ctor(&chan->user, "ttmBoMove", 877 mthd->oclass | (mthd->engine << 16), 878 mthd->oclass, NULL, 0, 879 &drm->ttm.copy); 880 if (ret == 0) { 881 ret = mthd->init(chan, drm->ttm.copy.handle); 882 if (ret) { 883 nvif_object_dtor(&drm->ttm.copy); 884 continue; 885 } 886 887 drm->ttm.move = mthd->exec; 888 drm->ttm.chan = chan; 889 name = mthd->name; 890 break; 891 } 892 } while ((++mthd)->exec); 893 894 NV_INFO(drm, "MM: using %s for buffer copies\n", name); 895 } 896 897 static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, 898 struct ttm_resource *new_reg) 899 { 900 struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL; 901 struct nouveau_bo *nvbo = nouveau_bo(bo); 902 struct nouveau_vma *vma; 903 904 /* ttm can now (stupidly) pass the driver bos it didn't create... */ 905 if (bo->destroy != nouveau_bo_del_ttm) 906 return; 907 908 nouveau_bo_del_io_reserve_lru(bo); 909 910 if (mem && new_reg->mem_type != TTM_PL_SYSTEM && 911 mem->mem.page == nvbo->page) { 912 list_for_each_entry(vma, &nvbo->vma_list, head) { 913 nouveau_vma_map(vma, mem); 914 } 915 } else { 916 list_for_each_entry(vma, &nvbo->vma_list, head) { 917 WARN_ON(ttm_bo_wait(bo, false, false)); 918 nouveau_vma_unmap(vma); 919 } 920 } 921 922 if (new_reg) 923 nvbo->offset = (new_reg->start << PAGE_SHIFT); 924 925 } 926 927 static int 928 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg, 929 struct nouveau_drm_tile **new_tile) 930 { 931 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 932 struct drm_device *dev = drm->dev; 933 struct nouveau_bo *nvbo = nouveau_bo(bo); 934 u64 offset = new_reg->start << PAGE_SHIFT; 935 936 *new_tile = NULL; 937 if (new_reg->mem_type != TTM_PL_VRAM) 938 return 0; 939 940 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { 941 *new_tile = nv10_bo_set_tiling(dev, offset, bo->base.size, 942 nvbo->mode, nvbo->zeta); 943 } 944 945 return 0; 946 } 947 948 static void 949 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, 950 struct nouveau_drm_tile *new_tile, 951 struct nouveau_drm_tile **old_tile) 952 { 953 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 954 struct drm_device *dev = drm->dev; 955 struct dma_fence *fence = dma_resv_excl_fence(bo->base.resv); 956 957 nv10_bo_put_tile_region(dev, *old_tile, fence); 958 *old_tile = new_tile; 959 } 960 961 static int 962 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, 963 struct ttm_operation_ctx *ctx, 964 struct ttm_resource *new_reg, 965 struct ttm_place *hop) 966 { 967 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 968 struct nouveau_bo *nvbo = nouveau_bo(bo); 969 struct ttm_resource *old_reg = bo->resource; 970 struct nouveau_drm_tile *new_tile = NULL; 971 int ret = 0; 972 973 974 if (new_reg->mem_type == TTM_PL_TT) { 975 ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg); 976 if (ret) 977 return ret; 978 } 979 980 nouveau_bo_move_ntfy(bo, new_reg); 981 ret = ttm_bo_wait_ctx(bo, ctx); 982 if (ret) 983 goto out_ntfy; 984 985 if (nvbo->bo.pin_count) 986 NV_WARN(drm, "Moving pinned object %p!\n", nvbo); 987 988 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { 989 ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile); 990 if (ret) 991 goto out_ntfy; 992 } 993 994 /* Fake bo copy. */ 995 if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) { 996 ttm_bo_move_null(bo, new_reg); 997 goto out; 998 } 999 1000 if (old_reg->mem_type == TTM_PL_SYSTEM && 1001 new_reg->mem_type == TTM_PL_TT) { 1002 ttm_bo_move_null(bo, new_reg); 1003 goto out; 1004 } 1005 1006 if (old_reg->mem_type == TTM_PL_TT && 1007 new_reg->mem_type == TTM_PL_SYSTEM) { 1008 nouveau_ttm_tt_unbind(bo->bdev, bo->ttm); 1009 ttm_resource_free(bo, &bo->resource); 1010 ttm_bo_assign_mem(bo, new_reg); 1011 goto out; 1012 } 1013 1014 /* Hardware assisted copy. */ 1015 if (drm->ttm.move) { 1016 if ((old_reg->mem_type == TTM_PL_SYSTEM && 1017 new_reg->mem_type == TTM_PL_VRAM) || 1018 (old_reg->mem_type == TTM_PL_VRAM && 1019 new_reg->mem_type == TTM_PL_SYSTEM)) { 1020 hop->fpfn = 0; 1021 hop->lpfn = 0; 1022 hop->mem_type = TTM_PL_TT; 1023 hop->flags = 0; 1024 return -EMULTIHOP; 1025 } 1026 ret = nouveau_bo_move_m2mf(bo, evict, ctx, 1027 new_reg); 1028 } else 1029 ret = -ENODEV; 1030 1031 if (ret) { 1032 /* Fallback to software copy. */ 1033 ret = ttm_bo_move_memcpy(bo, ctx, new_reg); 1034 } 1035 1036 out: 1037 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { 1038 if (ret) 1039 nouveau_bo_vm_cleanup(bo, NULL, &new_tile); 1040 else 1041 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); 1042 } 1043 out_ntfy: 1044 if (ret) { 1045 nouveau_bo_move_ntfy(bo, bo->resource); 1046 } 1047 return ret; 1048 } 1049 1050 static void 1051 nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm, 1052 struct ttm_resource *reg) 1053 { 1054 struct nouveau_mem *mem = nouveau_mem(reg); 1055 1056 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { 1057 switch (reg->mem_type) { 1058 case TTM_PL_TT: 1059 if (mem->kind) 1060 nvif_object_unmap_handle(&mem->mem.object); 1061 break; 1062 case TTM_PL_VRAM: 1063 nvif_object_unmap_handle(&mem->mem.object); 1064 break; 1065 default: 1066 break; 1067 } 1068 } 1069 } 1070 1071 static int 1072 nouveau_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *reg) 1073 { 1074 struct nouveau_drm *drm = nouveau_bdev(bdev); 1075 struct nvkm_device *device = nvxx_device(&drm->client.device); 1076 struct nouveau_mem *mem = nouveau_mem(reg); 1077 struct nvif_mmu *mmu = &drm->client.mmu; 1078 int ret; 1079 1080 mutex_lock(&drm->ttm.io_reserve_mutex); 1081 retry: 1082 switch (reg->mem_type) { 1083 case TTM_PL_SYSTEM: 1084 /* System memory */ 1085 ret = 0; 1086 goto out; 1087 case TTM_PL_TT: 1088 #if IS_ENABLED(CONFIG_AGP) 1089 if (drm->agp.bridge) { 1090 reg->bus.offset = (reg->start << PAGE_SHIFT) + 1091 drm->agp.base; 1092 reg->bus.is_iomem = !drm->agp.cma; 1093 reg->bus.caching = ttm_write_combined; 1094 } 1095 #endif 1096 if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || 1097 !mem->kind) { 1098 /* untiled */ 1099 ret = 0; 1100 break; 1101 } 1102 fallthrough; /* tiled memory */ 1103 case TTM_PL_VRAM: 1104 reg->bus.offset = (reg->start << PAGE_SHIFT) + 1105 device->func->resource_addr(device, 1); 1106 reg->bus.is_iomem = true; 1107 1108 /* Some BARs do not support being ioremapped WC */ 1109 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && 1110 mmu->type[drm->ttm.type_vram].type & NVIF_MEM_UNCACHED) 1111 reg->bus.caching = ttm_uncached; 1112 else 1113 reg->bus.caching = ttm_write_combined; 1114 1115 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { 1116 union { 1117 struct nv50_mem_map_v0 nv50; 1118 struct gf100_mem_map_v0 gf100; 1119 } args; 1120 u64 handle, length; 1121 u32 argc = 0; 1122 1123 switch (mem->mem.object.oclass) { 1124 case NVIF_CLASS_MEM_NV50: 1125 args.nv50.version = 0; 1126 args.nv50.ro = 0; 1127 args.nv50.kind = mem->kind; 1128 args.nv50.comp = mem->comp; 1129 argc = sizeof(args.nv50); 1130 break; 1131 case NVIF_CLASS_MEM_GF100: 1132 args.gf100.version = 0; 1133 args.gf100.ro = 0; 1134 args.gf100.kind = mem->kind; 1135 argc = sizeof(args.gf100); 1136 break; 1137 default: 1138 WARN_ON(1); 1139 break; 1140 } 1141 1142 ret = nvif_object_map_handle(&mem->mem.object, 1143 &args, argc, 1144 &handle, &length); 1145 if (ret != 1) { 1146 if (WARN_ON(ret == 0)) 1147 ret = -EINVAL; 1148 goto out; 1149 } 1150 1151 reg->bus.offset = handle; 1152 } 1153 ret = 0; 1154 break; 1155 default: 1156 ret = -EINVAL; 1157 } 1158 1159 out: 1160 if (ret == -ENOSPC) { 1161 struct nouveau_bo *nvbo; 1162 1163 nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru, 1164 typeof(*nvbo), 1165 io_reserve_lru); 1166 if (nvbo) { 1167 list_del_init(&nvbo->io_reserve_lru); 1168 drm_vma_node_unmap(&nvbo->bo.base.vma_node, 1169 bdev->dev_mapping); 1170 nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource); 1171 goto retry; 1172 } 1173 1174 } 1175 mutex_unlock(&drm->ttm.io_reserve_mutex); 1176 return ret; 1177 } 1178 1179 static void 1180 nouveau_ttm_io_mem_free(struct ttm_device *bdev, struct ttm_resource *reg) 1181 { 1182 struct nouveau_drm *drm = nouveau_bdev(bdev); 1183 1184 mutex_lock(&drm->ttm.io_reserve_mutex); 1185 nouveau_ttm_io_mem_free_locked(drm, reg); 1186 mutex_unlock(&drm->ttm.io_reserve_mutex); 1187 } 1188 1189 vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) 1190 { 1191 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1192 struct nouveau_bo *nvbo = nouveau_bo(bo); 1193 struct nvkm_device *device = nvxx_device(&drm->client.device); 1194 u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT; 1195 int i, ret; 1196 1197 /* as long as the bo isn't in vram, and isn't tiled, we've got 1198 * nothing to do here. 1199 */ 1200 if (bo->resource->mem_type != TTM_PL_VRAM) { 1201 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || 1202 !nvbo->kind) 1203 return 0; 1204 1205 if (bo->resource->mem_type != TTM_PL_SYSTEM) 1206 return 0; 1207 1208 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0); 1209 1210 } else { 1211 /* make sure bo is in mappable vram */ 1212 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA || 1213 bo->resource->start + bo->resource->num_pages < mappable) 1214 return 0; 1215 1216 for (i = 0; i < nvbo->placement.num_placement; ++i) { 1217 nvbo->placements[i].fpfn = 0; 1218 nvbo->placements[i].lpfn = mappable; 1219 } 1220 1221 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) { 1222 nvbo->busy_placements[i].fpfn = 0; 1223 nvbo->busy_placements[i].lpfn = mappable; 1224 } 1225 1226 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0); 1227 } 1228 1229 ret = nouveau_bo_validate(nvbo, false, false); 1230 if (unlikely(ret == -EBUSY || ret == -ERESTARTSYS)) 1231 return VM_FAULT_NOPAGE; 1232 else if (unlikely(ret)) 1233 return VM_FAULT_SIGBUS; 1234 1235 ttm_bo_move_to_lru_tail_unlocked(bo); 1236 return 0; 1237 } 1238 1239 static int 1240 nouveau_ttm_tt_populate(struct ttm_device *bdev, 1241 struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) 1242 { 1243 struct ttm_tt *ttm_dma = (void *)ttm; 1244 struct nouveau_drm *drm; 1245 struct device *dev; 1246 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); 1247 1248 if (ttm_tt_is_populated(ttm)) 1249 return 0; 1250 1251 if (slave && ttm->sg) { 1252 drm_prime_sg_to_dma_addr_array(ttm->sg, ttm_dma->dma_address, 1253 ttm->num_pages); 1254 return 0; 1255 } 1256 1257 drm = nouveau_bdev(bdev); 1258 dev = drm->dev->dev; 1259 1260 return ttm_pool_alloc(&drm->ttm.bdev.pool, ttm, ctx); 1261 } 1262 1263 static void 1264 nouveau_ttm_tt_unpopulate(struct ttm_device *bdev, 1265 struct ttm_tt *ttm) 1266 { 1267 struct nouveau_drm *drm; 1268 struct device *dev; 1269 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); 1270 1271 if (slave) 1272 return; 1273 1274 drm = nouveau_bdev(bdev); 1275 dev = drm->dev->dev; 1276 1277 return ttm_pool_free(&drm->ttm.bdev.pool, ttm); 1278 } 1279 1280 static void 1281 nouveau_ttm_tt_destroy(struct ttm_device *bdev, 1282 struct ttm_tt *ttm) 1283 { 1284 #if IS_ENABLED(CONFIG_AGP) 1285 struct nouveau_drm *drm = nouveau_bdev(bdev); 1286 if (drm->agp.bridge) { 1287 ttm_agp_unbind(ttm); 1288 ttm_tt_destroy_common(bdev, ttm); 1289 ttm_agp_destroy(ttm); 1290 return; 1291 } 1292 #endif 1293 nouveau_sgdma_destroy(bdev, ttm); 1294 } 1295 1296 void 1297 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive) 1298 { 1299 struct dma_resv *resv = nvbo->bo.base.resv; 1300 1301 if (exclusive) 1302 dma_resv_add_excl_fence(resv, &fence->base); 1303 else if (fence) 1304 dma_resv_add_shared_fence(resv, &fence->base); 1305 } 1306 1307 static void 1308 nouveau_bo_delete_mem_notify(struct ttm_buffer_object *bo) 1309 { 1310 nouveau_bo_move_ntfy(bo, NULL); 1311 } 1312 1313 struct ttm_device_funcs nouveau_bo_driver = { 1314 .ttm_tt_create = &nouveau_ttm_tt_create, 1315 .ttm_tt_populate = &nouveau_ttm_tt_populate, 1316 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate, 1317 .ttm_tt_destroy = &nouveau_ttm_tt_destroy, 1318 .eviction_valuable = ttm_bo_eviction_valuable, 1319 .evict_flags = nouveau_bo_evict_flags, 1320 .delete_mem_notify = nouveau_bo_delete_mem_notify, 1321 .move = nouveau_bo_move, 1322 .io_mem_reserve = &nouveau_ttm_io_mem_reserve, 1323 .io_mem_free = &nouveau_ttm_io_mem_free, 1324 }; 1325