Lines Matching +full:tile +full:- +full:cache
30 #include <linux/dma-mapping.h>
52 * NV10-NV40 tiling helpers
60 int i = reg - drm->tile.reg; in nv10_bo_update_tile_region()
61 struct nvkm_fb *fb = nvxx_fb(&drm->client.device); in nv10_bo_update_tile_region()
62 struct nvkm_fb_tile *tile = &fb->tile.region[i]; in nv10_bo_update_tile_region() local
64 nouveau_fence_unref(®->fence); in nv10_bo_update_tile_region()
66 if (tile->pitch) in nv10_bo_update_tile_region()
67 nvkm_fb_tile_fini(fb, i, tile); in nv10_bo_update_tile_region()
70 nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile); in nv10_bo_update_tile_region()
72 nvkm_fb_tile_prog(fb, i, tile); in nv10_bo_update_tile_region()
79 struct nouveau_drm_tile *tile = &drm->tile.reg[i]; in nv10_bo_get_tile_region() local
81 spin_lock(&drm->tile.lock); in nv10_bo_get_tile_region()
83 if (!tile->used && in nv10_bo_get_tile_region()
84 (!tile->fence || nouveau_fence_done(tile->fence))) in nv10_bo_get_tile_region()
85 tile->used = true; in nv10_bo_get_tile_region()
87 tile = NULL; in nv10_bo_get_tile_region()
89 spin_unlock(&drm->tile.lock); in nv10_bo_get_tile_region()
90 return tile; in nv10_bo_get_tile_region()
94 nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, in nv10_bo_put_tile_region() argument
99 if (tile) { in nv10_bo_put_tile_region()
100 spin_lock(&drm->tile.lock); in nv10_bo_put_tile_region()
101 tile->fence = (struct nouveau_fence *)dma_fence_get(fence); in nv10_bo_put_tile_region()
102 tile->used = false; in nv10_bo_put_tile_region()
103 spin_unlock(&drm->tile.lock); in nv10_bo_put_tile_region()
112 struct nvkm_fb *fb = nvxx_fb(&drm->client.device); in nv10_bo_set_tiling()
113 struct nouveau_drm_tile *tile, *found = NULL; in nv10_bo_set_tiling() local
116 for (i = 0; i < fb->tile.regions; i++) { in nv10_bo_set_tiling()
117 tile = nv10_bo_get_tile_region(dev, i); in nv10_bo_set_tiling()
120 found = tile; in nv10_bo_set_tiling()
123 } else if (tile && fb->tile.region[i].pitch) { in nv10_bo_set_tiling()
124 /* Kill an unused tile region. */ in nv10_bo_set_tiling()
125 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0); in nv10_bo_set_tiling()
128 nv10_bo_put_tile_region(dev, tile, NULL); in nv10_bo_set_tiling()
139 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_del_ttm()
140 struct drm_device *dev = drm->dev; in nouveau_bo_del_ttm()
143 WARN_ON(nvbo->bo.pin_count > 0); in nouveau_bo_del_ttm()
145 nv10_bo_put_tile_region(dev, nvbo->tile, NULL); in nouveau_bo_del_ttm()
151 if (bo->base.dev) in nouveau_bo_del_ttm()
152 drm_gem_object_release(&bo->base); in nouveau_bo_del_ttm()
154 dma_resv_fini(&bo->base._resv); in nouveau_bo_del_ttm()
162 x += y - 1; in roundup_64()
170 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_fixup_align()
171 struct nvif_device *device = &drm->client.device; in nouveau_bo_fixup_align()
173 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) { in nouveau_bo_fixup_align()
174 if (nvbo->mode) { in nouveau_bo_fixup_align()
175 if (device->info.chipset >= 0x40) { in nouveau_bo_fixup_align()
177 *size = roundup_64(*size, 64 * nvbo->mode); in nouveau_bo_fixup_align()
179 } else if (device->info.chipset >= 0x30) { in nouveau_bo_fixup_align()
181 *size = roundup_64(*size, 64 * nvbo->mode); in nouveau_bo_fixup_align()
183 } else if (device->info.chipset >= 0x20) { in nouveau_bo_fixup_align()
185 *size = roundup_64(*size, 64 * nvbo->mode); in nouveau_bo_fixup_align()
187 } else if (device->info.chipset >= 0x10) { in nouveau_bo_fixup_align()
189 *size = roundup_64(*size, 32 * nvbo->mode); in nouveau_bo_fixup_align()
193 *size = roundup_64(*size, (1 << nvbo->page)); in nouveau_bo_fixup_align()
194 *align = max((1 << nvbo->page), *align); in nouveau_bo_fixup_align()
204 struct nouveau_drm *drm = cli->drm; in nouveau_bo_alloc()
206 struct nvif_mmu *mmu = &cli->mmu; in nouveau_bo_alloc()
207 struct nvif_vmm *vmm = &nouveau_cli_vmm(cli)->vmm; in nouveau_bo_alloc()
208 int i, pi = -1; in nouveau_bo_alloc()
212 return ERR_PTR(-EINVAL); in nouveau_bo_alloc()
217 return ERR_PTR(-ENOMEM); in nouveau_bo_alloc()
219 INIT_LIST_HEAD(&nvbo->head); in nouveau_bo_alloc()
220 INIT_LIST_HEAD(&nvbo->entry); in nouveau_bo_alloc()
221 INIT_LIST_HEAD(&nvbo->vma_list); in nouveau_bo_alloc()
222 nvbo->bo.bdev = &drm->ttm.bdev; in nouveau_bo_alloc()
229 /* Determine if we can get a cache-coherent map, forcing in nouveau_bo_alloc()
233 nvbo->force_coherent = true; in nouveau_bo_alloc()
236 nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG); in nouveau_bo_alloc()
238 if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) { in nouveau_bo_alloc()
239 nvbo->kind = (tile_flags & 0x0000ff00) >> 8; in nouveau_bo_alloc()
240 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { in nouveau_bo_alloc()
242 return ERR_PTR(-EINVAL); in nouveau_bo_alloc()
245 nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind; in nouveau_bo_alloc()
246 } else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { in nouveau_bo_alloc()
247 nvbo->kind = (tile_flags & 0x00007f00) >> 8; in nouveau_bo_alloc()
248 nvbo->comp = (tile_flags & 0x00030000) >> 16; in nouveau_bo_alloc()
249 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { in nouveau_bo_alloc()
251 return ERR_PTR(-EINVAL); in nouveau_bo_alloc()
254 nvbo->zeta = (tile_flags & 0x00000007); in nouveau_bo_alloc()
256 nvbo->mode = tile_mode; in nouveau_bo_alloc()
260 for (i = 0; i < vmm->page_nr; i++) { in nouveau_bo_alloc()
263 * size for the buffer up-front, and pre-allocate its in nouveau_bo_alloc()
268 if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE && in nouveau_bo_alloc()
269 (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram) in nouveau_bo_alloc()
272 (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT)) in nouveau_bo_alloc()
279 if (pi < 0 || !nvbo->comp || vmm->page[i].comp) in nouveau_bo_alloc()
283 if (*size >= 1ULL << vmm->page[i].shift) in nouveau_bo_alloc()
289 return ERR_PTR(-EINVAL); in nouveau_bo_alloc()
293 if (nvbo->comp && !vmm->page[pi].comp) { in nouveau_bo_alloc()
294 if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100) in nouveau_bo_alloc()
295 nvbo->kind = mmu->kind[nvbo->kind]; in nouveau_bo_alloc()
296 nvbo->comp = 0; in nouveau_bo_alloc()
298 nvbo->page = vmm->page[pi].shift; in nouveau_bo_alloc()
301 for (i = 0; i < vmm->page_nr; i++) { in nouveau_bo_alloc()
304 * size for the buffer up-front, and pre-allocate its in nouveau_bo_alloc()
309 if ((domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram) in nouveau_bo_alloc()
312 (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT)) in nouveau_bo_alloc()
319 if (*size >= 1ULL << vmm->page[i].shift) in nouveau_bo_alloc()
324 return ERR_PTR(-EINVAL); in nouveau_bo_alloc()
326 nvbo->page = vmm->page[pi].shift; in nouveau_bo_alloc()
347 INIT_LIST_HEAD(&nvbo->io_reserve_lru); in nouveau_bo_init()
349 ret = ttm_bo_init_reserved(nvbo->bo.bdev, &nvbo->bo, type, in nouveau_bo_init()
350 &nvbo->placement, align >> PAGE_SHIFT, &ctx, in nouveau_bo_init()
358 ttm_bo_unreserve(&nvbo->bo); in nouveau_bo_init()
377 nvbo->bo.base.size = size; in nouveau_bo_new()
378 dma_resv_init(&nvbo->bo.base._resv); in nouveau_bo_new()
379 drm_vma_node_reset(&nvbo->bo.base.vma_node); in nouveau_bo_new()
384 drm_gem_gpuva_init(&nvbo->bo.base); in nouveau_bo_new()
418 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in set_placement_range()
419 u64 vram_size = drm->client.device.info.ram_size; in set_placement_range()
422 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && in set_placement_range()
423 nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) && in set_placement_range()
424 nvbo->bo.base.size < vram_size / 4) { in set_placement_range()
428 * speed up when alpha-blending and depth-test are enabled in set_placement_range()
431 if (nvbo->zeta) { in set_placement_range()
438 for (i = 0; i < nvbo->placement.num_placement; ++i) { in set_placement_range()
439 nvbo->placements[i].fpfn = fpfn; in set_placement_range()
440 nvbo->placements[i].lpfn = lpfn; in set_placement_range()
442 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) { in set_placement_range()
443 nvbo->busy_placements[i].fpfn = fpfn; in set_placement_range()
444 nvbo->busy_placements[i].lpfn = lpfn; in set_placement_range()
453 struct ttm_placement *pl = &nvbo->placement; in nouveau_bo_placement_set()
455 pl->placement = nvbo->placements; in nouveau_bo_placement_set()
456 set_placement_list(nvbo->placements, &pl->num_placement, domain); in nouveau_bo_placement_set()
458 pl->busy_placement = nvbo->busy_placements; in nouveau_bo_placement_set()
459 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, in nouveau_bo_placement_set()
468 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_pin()
469 struct ttm_buffer_object *bo = &nvbo->bo; in nouveau_bo_pin()
477 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && in nouveau_bo_pin()
479 if (!nvbo->contig) { in nouveau_bo_pin()
480 nvbo->contig = true; in nouveau_bo_pin()
486 if (nvbo->bo.pin_count) { in nouveau_bo_pin()
489 switch (bo->resource->mem_type) { in nouveau_bo_pin()
503 bo->resource->mem_type, domain); in nouveau_bo_pin()
504 ret = -EBUSY; in nouveau_bo_pin()
506 ttm_bo_pin(&nvbo->bo); in nouveau_bo_pin()
522 ttm_bo_pin(&nvbo->bo); in nouveau_bo_pin()
524 switch (bo->resource->mem_type) { in nouveau_bo_pin()
526 drm->gem.vram_available -= bo->base.size; in nouveau_bo_pin()
529 drm->gem.gart_available -= bo->base.size; in nouveau_bo_pin()
537 nvbo->contig = false; in nouveau_bo_pin()
545 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_unpin()
546 struct ttm_buffer_object *bo = &nvbo->bo; in nouveau_bo_unpin()
553 ttm_bo_unpin(&nvbo->bo); in nouveau_bo_unpin()
554 if (!nvbo->bo.pin_count) { in nouveau_bo_unpin()
555 switch (bo->resource->mem_type) { in nouveau_bo_unpin()
557 drm->gem.vram_available += bo->base.size; in nouveau_bo_unpin()
560 drm->gem.gart_available += bo->base.size; in nouveau_bo_unpin()
576 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); in nouveau_bo_map()
580 ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size), &nvbo->kmap); in nouveau_bo_map()
582 ttm_bo_unreserve(&nvbo->bo); in nouveau_bo_map()
592 ttm_bo_kunmap(&nvbo->kmap); in nouveau_bo_unmap()
598 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_sync_for_device()
599 struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; in nouveau_bo_sync_for_device()
602 if (!ttm_dma || !ttm_dma->dma_address) in nouveau_bo_sync_for_device()
604 if (!ttm_dma->pages) { in nouveau_bo_sync_for_device()
610 if (nvbo->force_coherent) in nouveau_bo_sync_for_device()
614 while (i < ttm_dma->num_pages) { in nouveau_bo_sync_for_device()
615 struct page *p = ttm_dma->pages[i]; in nouveau_bo_sync_for_device()
618 for (j = i + 1; j < ttm_dma->num_pages; ++j) { in nouveau_bo_sync_for_device()
619 if (++p != ttm_dma->pages[j]) in nouveau_bo_sync_for_device()
624 dma_sync_single_for_device(drm->dev->dev, in nouveau_bo_sync_for_device()
625 ttm_dma->dma_address[i], in nouveau_bo_sync_for_device()
634 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_sync_for_cpu()
635 struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; in nouveau_bo_sync_for_cpu()
638 if (!ttm_dma || !ttm_dma->dma_address) in nouveau_bo_sync_for_cpu()
640 if (!ttm_dma->pages) { in nouveau_bo_sync_for_cpu()
646 if (nvbo->force_coherent) in nouveau_bo_sync_for_cpu()
650 while (i < ttm_dma->num_pages) { in nouveau_bo_sync_for_cpu()
651 struct page *p = ttm_dma->pages[i]; in nouveau_bo_sync_for_cpu()
654 for (j = i + 1; j < ttm_dma->num_pages; ++j) { in nouveau_bo_sync_for_cpu()
655 if (++p != ttm_dma->pages[j]) in nouveau_bo_sync_for_cpu()
661 dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i], in nouveau_bo_sync_for_cpu()
669 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_add_io_reserve_lru()
672 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_bo_add_io_reserve_lru()
673 list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru); in nouveau_bo_add_io_reserve_lru()
674 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_bo_add_io_reserve_lru()
679 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_del_io_reserve_lru()
682 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_bo_del_io_reserve_lru()
683 list_del_init(&nvbo->io_reserve_lru); in nouveau_bo_del_io_reserve_lru()
684 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_bo_del_io_reserve_lru()
694 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx); in nouveau_bo_validate()
707 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); in nouveau_bo_wr16()
721 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); in nouveau_bo_rd32()
735 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); in nouveau_bo_wr32()
749 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_ttm_tt_create()
751 if (drm->agp.bridge) { in nouveau_ttm_tt_create()
752 return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags); in nouveau_ttm_tt_create()
767 return -EINVAL; in nouveau_ttm_tt_bind()
769 if (drm->agp.bridge) in nouveau_ttm_tt_bind()
781 if (drm->agp.bridge) { in nouveau_ttm_tt_unbind()
794 switch (bo->resource->mem_type) { in nouveau_bo_evict_flags()
804 *pl = nvbo->placement; in nouveau_bo_evict_flags()
811 struct nouveau_mem *old_mem = nouveau_mem(bo->resource); in nouveau_bo_move_prep()
813 struct nvif_vmm *vmm = &drm->client.vmm.vmm; in nouveau_bo_move_prep()
816 ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0, in nouveau_bo_move_prep()
817 old_mem->mem.size, &old_mem->vma[0]); in nouveau_bo_move_prep()
821 ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0, in nouveau_bo_move_prep()
822 new_mem->mem.size, &old_mem->vma[1]); in nouveau_bo_move_prep()
826 ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]); in nouveau_bo_move_prep()
830 ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]); in nouveau_bo_move_prep()
833 nvif_vmm_put(vmm, &old_mem->vma[1]); in nouveau_bo_move_prep()
834 nvif_vmm_put(vmm, &old_mem->vma[0]); in nouveau_bo_move_prep()
844 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_move_m2mf()
845 struct nouveau_channel *chan = drm->ttm.chan; in nouveau_bo_move_m2mf()
846 struct nouveau_cli *cli = chan->cli; in nouveau_bo_move_m2mf()
854 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { in nouveau_bo_move_m2mf()
860 if (drm_drv_uses_atomic_modeset(drm->dev)) in nouveau_bo_move_m2mf()
861 mutex_lock(&cli->mutex); in nouveau_bo_move_m2mf()
863 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING); in nouveau_bo_move_m2mf()
865 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible); in nouveau_bo_move_m2mf()
869 ret = drm->ttm.move(chan, bo, bo->resource, new_reg); in nouveau_bo_move_m2mf()
886 ret = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false, in nouveau_bo_move_m2mf()
891 mutex_unlock(&cli->mutex); in nouveau_bo_move_m2mf()
939 if (mthd->engine) in nouveau_bo_move_init()
940 chan = drm->cechan; in nouveau_bo_move_init()
942 chan = drm->channel; in nouveau_bo_move_init()
946 ret = nvif_object_ctor(&chan->user, "ttmBoMove", in nouveau_bo_move_init()
947 mthd->oclass | (mthd->engine << 16), in nouveau_bo_move_init()
948 mthd->oclass, NULL, 0, in nouveau_bo_move_init()
949 &drm->ttm.copy); in nouveau_bo_move_init()
951 ret = mthd->init(chan, drm->ttm.copy.handle); in nouveau_bo_move_init()
953 nvif_object_dtor(&drm->ttm.copy); in nouveau_bo_move_init()
957 drm->ttm.move = mthd->exec; in nouveau_bo_move_init()
958 drm->ttm.chan = chan; in nouveau_bo_move_init()
959 name = mthd->name; in nouveau_bo_move_init()
962 } while ((++mthd)->exec); in nouveau_bo_move_init()
976 if (bo->destroy != nouveau_bo_del_ttm) in nouveau_bo_move_ntfy()
981 if (mem && new_reg->mem_type != TTM_PL_SYSTEM && in nouveau_bo_move_ntfy()
982 mem->mem.page == nvbo->page) { in nouveau_bo_move_ntfy()
983 list_for_each_entry(vma, &nvbo->vma_list, head) { in nouveau_bo_move_ntfy()
988 list_for_each_entry(vma, &nvbo->vma_list, head) { in nouveau_bo_move_ntfy()
989 ret = dma_resv_wait_timeout(bo->base.resv, in nouveau_bo_move_ntfy()
999 nvbo->offset = (new_reg->start << PAGE_SHIFT); in nouveau_bo_move_ntfy()
1007 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_vm_bind()
1008 struct drm_device *dev = drm->dev; in nouveau_bo_vm_bind()
1010 u64 offset = new_reg->start << PAGE_SHIFT; in nouveau_bo_vm_bind()
1013 if (new_reg->mem_type != TTM_PL_VRAM) in nouveau_bo_vm_bind()
1016 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { in nouveau_bo_vm_bind()
1017 *new_tile = nv10_bo_set_tiling(dev, offset, bo->base.size, in nouveau_bo_vm_bind()
1018 nvbo->mode, nvbo->zeta); in nouveau_bo_vm_bind()
1029 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_vm_cleanup()
1030 struct drm_device *dev = drm->dev; in nouveau_bo_vm_cleanup()
1034 ret = dma_resv_get_singleton(bo->base.resv, DMA_RESV_USAGE_WRITE, in nouveau_bo_vm_cleanup()
1037 dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_WRITE, in nouveau_bo_vm_cleanup()
1050 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_move()
1052 struct ttm_resource *old_reg = bo->resource; in nouveau_bo_move()
1057 if (new_reg->mem_type == TTM_PL_TT) { in nouveau_bo_move()
1058 ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg); in nouveau_bo_move()
1068 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { in nouveau_bo_move()
1075 if (!old_reg || (old_reg->mem_type == TTM_PL_SYSTEM && in nouveau_bo_move()
1076 !bo->ttm)) { in nouveau_bo_move()
1081 if (old_reg->mem_type == TTM_PL_SYSTEM && in nouveau_bo_move()
1082 new_reg->mem_type == TTM_PL_TT) { in nouveau_bo_move()
1087 if (old_reg->mem_type == TTM_PL_TT && in nouveau_bo_move()
1088 new_reg->mem_type == TTM_PL_SYSTEM) { in nouveau_bo_move()
1089 nouveau_ttm_tt_unbind(bo->bdev, bo->ttm); in nouveau_bo_move()
1090 ttm_resource_free(bo, &bo->resource); in nouveau_bo_move()
1096 if (drm->ttm.move) { in nouveau_bo_move()
1097 if ((old_reg->mem_type == TTM_PL_SYSTEM && in nouveau_bo_move()
1098 new_reg->mem_type == TTM_PL_VRAM) || in nouveau_bo_move()
1099 (old_reg->mem_type == TTM_PL_VRAM && in nouveau_bo_move()
1100 new_reg->mem_type == TTM_PL_SYSTEM)) { in nouveau_bo_move()
1101 hop->fpfn = 0; in nouveau_bo_move()
1102 hop->lpfn = 0; in nouveau_bo_move()
1103 hop->mem_type = TTM_PL_TT; in nouveau_bo_move()
1104 hop->flags = 0; in nouveau_bo_move()
1105 return -EMULTIHOP; in nouveau_bo_move()
1110 ret = -ENODEV; in nouveau_bo_move()
1118 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { in nouveau_bo_move()
1122 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); in nouveau_bo_move()
1126 nouveau_bo_move_ntfy(bo, bo->resource); in nouveau_bo_move()
1137 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { in nouveau_ttm_io_mem_free_locked()
1138 switch (reg->mem_type) { in nouveau_ttm_io_mem_free_locked()
1140 if (mem->kind) in nouveau_ttm_io_mem_free_locked()
1141 nvif_object_unmap_handle(&mem->mem.object); in nouveau_ttm_io_mem_free_locked()
1144 nvif_object_unmap_handle(&mem->mem.object); in nouveau_ttm_io_mem_free_locked()
1156 struct nvkm_device *device = nvxx_device(&drm->client.device); in nouveau_ttm_io_mem_reserve()
1158 struct nvif_mmu *mmu = &drm->client.mmu; in nouveau_ttm_io_mem_reserve()
1161 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_ttm_io_mem_reserve()
1163 switch (reg->mem_type) { in nouveau_ttm_io_mem_reserve()
1170 if (drm->agp.bridge) { in nouveau_ttm_io_mem_reserve()
1171 reg->bus.offset = (reg->start << PAGE_SHIFT) + in nouveau_ttm_io_mem_reserve()
1172 drm->agp.base; in nouveau_ttm_io_mem_reserve()
1173 reg->bus.is_iomem = !drm->agp.cma; in nouveau_ttm_io_mem_reserve()
1174 reg->bus.caching = ttm_write_combined; in nouveau_ttm_io_mem_reserve()
1177 if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || in nouveau_ttm_io_mem_reserve()
1178 !mem->kind) { in nouveau_ttm_io_mem_reserve()
1185 reg->bus.offset = (reg->start << PAGE_SHIFT) + in nouveau_ttm_io_mem_reserve()
1186 device->func->resource_addr(device, 1); in nouveau_ttm_io_mem_reserve()
1187 reg->bus.is_iomem = true; in nouveau_ttm_io_mem_reserve()
1190 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && in nouveau_ttm_io_mem_reserve()
1191 mmu->type[drm->ttm.type_vram].type & NVIF_MEM_UNCACHED) in nouveau_ttm_io_mem_reserve()
1192 reg->bus.caching = ttm_uncached; in nouveau_ttm_io_mem_reserve()
1194 reg->bus.caching = ttm_write_combined; in nouveau_ttm_io_mem_reserve()
1196 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { in nouveau_ttm_io_mem_reserve()
1204 switch (mem->mem.object.oclass) { in nouveau_ttm_io_mem_reserve()
1208 args.nv50.kind = mem->kind; in nouveau_ttm_io_mem_reserve()
1209 args.nv50.comp = mem->comp; in nouveau_ttm_io_mem_reserve()
1215 args.gf100.kind = mem->kind; in nouveau_ttm_io_mem_reserve()
1223 ret = nvif_object_map_handle(&mem->mem.object, in nouveau_ttm_io_mem_reserve()
1228 ret = -EINVAL; in nouveau_ttm_io_mem_reserve()
1232 reg->bus.offset = handle; in nouveau_ttm_io_mem_reserve()
1237 ret = -EINVAL; in nouveau_ttm_io_mem_reserve()
1241 if (ret == -ENOSPC) { in nouveau_ttm_io_mem_reserve()
1244 nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru, in nouveau_ttm_io_mem_reserve()
1248 list_del_init(&nvbo->io_reserve_lru); in nouveau_ttm_io_mem_reserve()
1249 drm_vma_node_unmap(&nvbo->bo.base.vma_node, in nouveau_ttm_io_mem_reserve()
1250 bdev->dev_mapping); in nouveau_ttm_io_mem_reserve()
1251 nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource); in nouveau_ttm_io_mem_reserve()
1252 nvbo->bo.resource->bus.offset = 0; in nouveau_ttm_io_mem_reserve()
1253 nvbo->bo.resource->bus.addr = NULL; in nouveau_ttm_io_mem_reserve()
1258 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_ttm_io_mem_reserve()
1267 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_ttm_io_mem_free()
1269 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_ttm_io_mem_free()
1274 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_ttm_fault_reserve_notify()
1276 struct nvkm_device *device = nvxx_device(&drm->client.device); in nouveau_ttm_fault_reserve_notify()
1277 u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT; in nouveau_ttm_fault_reserve_notify()
1283 if (bo->resource->mem_type != TTM_PL_VRAM) { in nouveau_ttm_fault_reserve_notify()
1284 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || in nouveau_ttm_fault_reserve_notify()
1285 !nvbo->kind) in nouveau_ttm_fault_reserve_notify()
1288 if (bo->resource->mem_type != TTM_PL_SYSTEM) in nouveau_ttm_fault_reserve_notify()
1295 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA || in nouveau_ttm_fault_reserve_notify()
1296 bo->resource->start + PFN_UP(bo->resource->size) < mappable) in nouveau_ttm_fault_reserve_notify()
1299 for (i = 0; i < nvbo->placement.num_placement; ++i) { in nouveau_ttm_fault_reserve_notify()
1300 nvbo->placements[i].fpfn = 0; in nouveau_ttm_fault_reserve_notify()
1301 nvbo->placements[i].lpfn = mappable; in nouveau_ttm_fault_reserve_notify()
1304 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) { in nouveau_ttm_fault_reserve_notify()
1305 nvbo->busy_placements[i].fpfn = 0; in nouveau_ttm_fault_reserve_notify()
1306 nvbo->busy_placements[i].lpfn = mappable; in nouveau_ttm_fault_reserve_notify()
1313 if (unlikely(ret == -EBUSY || ret == -ERESTARTSYS)) in nouveau_ttm_fault_reserve_notify()
1328 bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL); in nouveau_ttm_tt_populate()
1333 if (slave && ttm->sg) { in nouveau_ttm_tt_populate()
1334 drm_prime_sg_to_dma_addr_array(ttm->sg, ttm_dma->dma_address, in nouveau_ttm_tt_populate()
1335 ttm->num_pages); in nouveau_ttm_tt_populate()
1341 return ttm_pool_alloc(&drm->ttm.bdev.pool, ttm, ctx); in nouveau_ttm_tt_populate()
1349 bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL); in nouveau_ttm_tt_unpopulate()
1358 return ttm_pool_free(&drm->ttm.bdev.pool, ttm); in nouveau_ttm_tt_unpopulate()
1367 if (drm->agp.bridge) { in nouveau_ttm_tt_destroy()
1378 struct dma_resv *resv = nvbo->bo.base.resv; in nouveau_bo_fence()
1383 dma_resv_add_fence(resv, &fence->base, exclusive ? in nouveau_bo_fence()