1 /*
2  * Copyright 2007 Dave Airlied
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 /*
25  * Authors: Dave Airlied <airlied@linux.ie>
26  *	    Ben Skeggs   <darktama@iinet.net.au>
27  *	    Jeremy Kolb  <jkolb@brandeis.edu>
28  */
29 
30 #include <linux/dma-mapping.h>
31 
32 #include "nouveau_drv.h"
33 #include "nouveau_chan.h"
34 #include "nouveau_fence.h"
35 
36 #include "nouveau_bo.h"
37 #include "nouveau_ttm.h"
38 #include "nouveau_gem.h"
39 #include "nouveau_mem.h"
40 #include "nouveau_vmm.h"
41 
42 #include <nvif/class.h>
43 #include <nvif/if500b.h>
44 #include <nvif/if900b.h>
45 
46 static int nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm,
47 			       struct ttm_resource *reg);
48 static void nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
49 
50 /*
51  * NV10-NV40 tiling helpers
52  */
53 
54 static void
55 nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
56 			   u32 addr, u32 size, u32 pitch, u32 flags)
57 {
58 	struct nouveau_drm *drm = nouveau_drm(dev);
59 	int i = reg - drm->tile.reg;
60 	struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
61 	struct nvkm_fb_tile *tile = &fb->tile.region[i];
62 
63 	nouveau_fence_unref(&reg->fence);
64 
65 	if (tile->pitch)
66 		nvkm_fb_tile_fini(fb, i, tile);
67 
68 	if (pitch)
69 		nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
70 
71 	nvkm_fb_tile_prog(fb, i, tile);
72 }
73 
74 static struct nouveau_drm_tile *
75 nv10_bo_get_tile_region(struct drm_device *dev, int i)
76 {
77 	struct nouveau_drm *drm = nouveau_drm(dev);
78 	struct nouveau_drm_tile *tile = &drm->tile.reg[i];
79 
80 	spin_lock(&drm->tile.lock);
81 
82 	if (!tile->used &&
83 	    (!tile->fence || nouveau_fence_done(tile->fence)))
84 		tile->used = true;
85 	else
86 		tile = NULL;
87 
88 	spin_unlock(&drm->tile.lock);
89 	return tile;
90 }
91 
92 static void
93 nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
94 			struct dma_fence *fence)
95 {
96 	struct nouveau_drm *drm = nouveau_drm(dev);
97 
98 	if (tile) {
99 		spin_lock(&drm->tile.lock);
100 		tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
101 		tile->used = false;
102 		spin_unlock(&drm->tile.lock);
103 	}
104 }
105 
106 static struct nouveau_drm_tile *
107 nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
108 		   u32 size, u32 pitch, u32 zeta)
109 {
110 	struct nouveau_drm *drm = nouveau_drm(dev);
111 	struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
112 	struct nouveau_drm_tile *tile, *found = NULL;
113 	int i;
114 
115 	for (i = 0; i < fb->tile.regions; i++) {
116 		tile = nv10_bo_get_tile_region(dev, i);
117 
118 		if (pitch && !found) {
119 			found = tile;
120 			continue;
121 
122 		} else if (tile && fb->tile.region[i].pitch) {
123 			/* Kill an unused tile region. */
124 			nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
125 		}
126 
127 		nv10_bo_put_tile_region(dev, tile, NULL);
128 	}
129 
130 	if (found)
131 		nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta);
132 	return found;
133 }
134 
135 static void
136 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
137 {
138 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
139 	struct drm_device *dev = drm->dev;
140 	struct nouveau_bo *nvbo = nouveau_bo(bo);
141 
142 	WARN_ON(nvbo->bo.pin_count > 0);
143 	nouveau_bo_del_io_reserve_lru(bo);
144 	nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
145 
146 	/*
147 	 * If nouveau_bo_new() allocated this buffer, the GEM object was never
148 	 * initialized, so don't attempt to release it.
149 	 */
150 	if (bo->base.dev)
151 		drm_gem_object_release(&bo->base);
152 
153 	kfree(nvbo);
154 }
155 
156 static inline u64
157 roundup_64(u64 x, u32 y)
158 {
159 	x += y - 1;
160 	do_div(x, y);
161 	return x * y;
162 }
163 
164 static void
165 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size)
166 {
167 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
168 	struct nvif_device *device = &drm->client.device;
169 
170 	if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
171 		if (nvbo->mode) {
172 			if (device->info.chipset >= 0x40) {
173 				*align = 65536;
174 				*size = roundup_64(*size, 64 * nvbo->mode);
175 
176 			} else if (device->info.chipset >= 0x30) {
177 				*align = 32768;
178 				*size = roundup_64(*size, 64 * nvbo->mode);
179 
180 			} else if (device->info.chipset >= 0x20) {
181 				*align = 16384;
182 				*size = roundup_64(*size, 64 * nvbo->mode);
183 
184 			} else if (device->info.chipset >= 0x10) {
185 				*align = 16384;
186 				*size = roundup_64(*size, 32 * nvbo->mode);
187 			}
188 		}
189 	} else {
190 		*size = roundup_64(*size, (1 << nvbo->page));
191 		*align = max((1 <<  nvbo->page), *align);
192 	}
193 
194 	*size = roundup_64(*size, PAGE_SIZE);
195 }
196 
197 struct nouveau_bo *
198 nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
199 		 u32 tile_mode, u32 tile_flags)
200 {
201 	struct nouveau_drm *drm = cli->drm;
202 	struct nouveau_bo *nvbo;
203 	struct nvif_mmu *mmu = &cli->mmu;
204 	struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm;
205 	int i, pi = -1;
206 
207 	if (!*size) {
208 		NV_WARN(drm, "skipped size %016llx\n", *size);
209 		return ERR_PTR(-EINVAL);
210 	}
211 
212 	nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
213 	if (!nvbo)
214 		return ERR_PTR(-ENOMEM);
215 	INIT_LIST_HEAD(&nvbo->head);
216 	INIT_LIST_HEAD(&nvbo->entry);
217 	INIT_LIST_HEAD(&nvbo->vma_list);
218 	nvbo->bo.bdev = &drm->ttm.bdev;
219 
220 	/* This is confusing, and doesn't actually mean we want an uncached
221 	 * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
222 	 * into in nouveau_gem_new().
223 	 */
224 	if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) {
225 		/* Determine if we can get a cache-coherent map, forcing
226 		 * uncached mapping if we can't.
227 		 */
228 		if (!nouveau_drm_use_coherent_gpu_mapping(drm))
229 			nvbo->force_coherent = true;
230 	}
231 
232 	if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
233 		nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
234 		if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
235 			kfree(nvbo);
236 			return ERR_PTR(-EINVAL);
237 		}
238 
239 		nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
240 	} else
241 	if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
242 		nvbo->kind = (tile_flags & 0x00007f00) >> 8;
243 		nvbo->comp = (tile_flags & 0x00030000) >> 16;
244 		if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
245 			kfree(nvbo);
246 			return ERR_PTR(-EINVAL);
247 		}
248 	} else {
249 		nvbo->zeta = (tile_flags & 0x00000007);
250 	}
251 	nvbo->mode = tile_mode;
252 	nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
253 
254 	/* Determine the desirable target GPU page size for the buffer. */
255 	for (i = 0; i < vmm->page_nr; i++) {
256 		/* Because we cannot currently allow VMM maps to fail
257 		 * during buffer migration, we need to determine page
258 		 * size for the buffer up-front, and pre-allocate its
259 		 * page tables.
260 		 *
261 		 * Skip page sizes that can't support needed domains.
262 		 */
263 		if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
264 		    (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
265 			continue;
266 		if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
267 		    (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
268 			continue;
269 
270 		/* Select this page size if it's the first that supports
271 		 * the potential memory domains, or when it's compatible
272 		 * with the requested compression settings.
273 		 */
274 		if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
275 			pi = i;
276 
277 		/* Stop once the buffer is larger than the current page size. */
278 		if (*size >= 1ULL << vmm->page[i].shift)
279 			break;
280 	}
281 
282 	if (WARN_ON(pi < 0))
283 		return ERR_PTR(-EINVAL);
284 
285 	/* Disable compression if suitable settings couldn't be found. */
286 	if (nvbo->comp && !vmm->page[pi].comp) {
287 		if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
288 			nvbo->kind = mmu->kind[nvbo->kind];
289 		nvbo->comp = 0;
290 	}
291 	nvbo->page = vmm->page[pi].shift;
292 
293 	nouveau_bo_fixup_align(nvbo, align, size);
294 
295 	return nvbo;
296 }
297 
298 int
299 nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain,
300 		struct sg_table *sg, struct dma_resv *robj)
301 {
302 	int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
303 	size_t acc_size;
304 	int ret;
305 
306 	acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo));
307 
308 	nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
309 	nouveau_bo_placement_set(nvbo, domain, 0);
310 	INIT_LIST_HEAD(&nvbo->io_reserve_lru);
311 
312 	ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
313 			  &nvbo->placement, align >> PAGE_SHIFT, false,
314 			  acc_size, sg, robj, nouveau_bo_del_ttm);
315 	if (ret) {
316 		/* ttm will call nouveau_bo_del_ttm if it fails.. */
317 		return ret;
318 	}
319 
320 	return 0;
321 }
322 
323 int
324 nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
325 	       uint32_t domain, uint32_t tile_mode, uint32_t tile_flags,
326 	       struct sg_table *sg, struct dma_resv *robj,
327 	       struct nouveau_bo **pnvbo)
328 {
329 	struct nouveau_bo *nvbo;
330 	int ret;
331 
332 	nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
333 				tile_flags);
334 	if (IS_ERR(nvbo))
335 		return PTR_ERR(nvbo);
336 
337 	ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj);
338 	if (ret)
339 		return ret;
340 
341 	*pnvbo = nvbo;
342 	return 0;
343 }
344 
345 static void
346 set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t domain)
347 {
348 	*n = 0;
349 
350 	if (domain & NOUVEAU_GEM_DOMAIN_VRAM) {
351 		pl[*n].mem_type = TTM_PL_VRAM;
352 		pl[*n].flags = 0;
353 		(*n)++;
354 	}
355 	if (domain & NOUVEAU_GEM_DOMAIN_GART) {
356 		pl[*n].mem_type = TTM_PL_TT;
357 		pl[*n].flags = 0;
358 		(*n)++;
359 	}
360 	if (domain & NOUVEAU_GEM_DOMAIN_CPU) {
361 		pl[*n].mem_type = TTM_PL_SYSTEM;
362 		pl[(*n)++].flags = 0;
363 	}
364 }
365 
366 static void
367 set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
368 {
369 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
370 	u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT;
371 	unsigned i, fpfn, lpfn;
372 
373 	if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
374 	    nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) &&
375 	    nvbo->bo.mem.num_pages < vram_pages / 4) {
376 		/*
377 		 * Make sure that the color and depth buffers are handled
378 		 * by independent memory controller units. Up to a 9x
379 		 * speed up when alpha-blending and depth-test are enabled
380 		 * at the same time.
381 		 */
382 		if (nvbo->zeta) {
383 			fpfn = vram_pages / 2;
384 			lpfn = ~0;
385 		} else {
386 			fpfn = 0;
387 			lpfn = vram_pages / 2;
388 		}
389 		for (i = 0; i < nvbo->placement.num_placement; ++i) {
390 			nvbo->placements[i].fpfn = fpfn;
391 			nvbo->placements[i].lpfn = lpfn;
392 		}
393 		for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
394 			nvbo->busy_placements[i].fpfn = fpfn;
395 			nvbo->busy_placements[i].lpfn = lpfn;
396 		}
397 	}
398 }
399 
400 void
401 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
402 			 uint32_t busy)
403 {
404 	struct ttm_placement *pl = &nvbo->placement;
405 
406 	pl->placement = nvbo->placements;
407 	set_placement_list(nvbo->placements, &pl->num_placement, domain);
408 
409 	pl->busy_placement = nvbo->busy_placements;
410 	set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
411 			   domain | busy);
412 
413 	set_placement_range(nvbo, domain);
414 }
415 
416 int
417 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
418 {
419 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
420 	struct ttm_buffer_object *bo = &nvbo->bo;
421 	bool force = false, evict = false;
422 	int ret;
423 
424 	ret = ttm_bo_reserve(bo, false, false, NULL);
425 	if (ret)
426 		return ret;
427 
428 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
429 	    domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) {
430 		if (!nvbo->contig) {
431 			nvbo->contig = true;
432 			force = true;
433 			evict = true;
434 		}
435 	}
436 
437 	if (nvbo->bo.pin_count) {
438 		bool error = evict;
439 
440 		switch (bo->mem.mem_type) {
441 		case TTM_PL_VRAM:
442 			error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM);
443 			break;
444 		case TTM_PL_TT:
445 			error |= !(domain & NOUVEAU_GEM_DOMAIN_GART);
446 		default:
447 			break;
448 		}
449 
450 		if (error) {
451 			NV_ERROR(drm, "bo %p pinned elsewhere: "
452 				      "0x%08x vs 0x%08x\n", bo,
453 				 bo->mem.mem_type, domain);
454 			ret = -EBUSY;
455 		}
456 		ttm_bo_pin(&nvbo->bo);
457 		goto out;
458 	}
459 
460 	if (evict) {
461 		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
462 		ret = nouveau_bo_validate(nvbo, false, false);
463 		if (ret)
464 			goto out;
465 	}
466 
467 	nouveau_bo_placement_set(nvbo, domain, 0);
468 	ret = nouveau_bo_validate(nvbo, false, false);
469 	if (ret)
470 		goto out;
471 
472 	ttm_bo_pin(&nvbo->bo);
473 
474 	switch (bo->mem.mem_type) {
475 	case TTM_PL_VRAM:
476 		drm->gem.vram_available -= bo->base.size;
477 		break;
478 	case TTM_PL_TT:
479 		drm->gem.gart_available -= bo->base.size;
480 		break;
481 	default:
482 		break;
483 	}
484 
485 out:
486 	if (force && ret)
487 		nvbo->contig = false;
488 	ttm_bo_unreserve(bo);
489 	return ret;
490 }
491 
492 int
493 nouveau_bo_unpin(struct nouveau_bo *nvbo)
494 {
495 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
496 	struct ttm_buffer_object *bo = &nvbo->bo;
497 	int ret;
498 
499 	ret = ttm_bo_reserve(bo, false, false, NULL);
500 	if (ret)
501 		return ret;
502 
503 	ttm_bo_unpin(&nvbo->bo);
504 	if (!nvbo->bo.pin_count) {
505 		switch (bo->mem.mem_type) {
506 		case TTM_PL_VRAM:
507 			drm->gem.vram_available += bo->base.size;
508 			break;
509 		case TTM_PL_TT:
510 			drm->gem.gart_available += bo->base.size;
511 			break;
512 		default:
513 			break;
514 		}
515 	}
516 
517 	ttm_bo_unreserve(bo);
518 	return 0;
519 }
520 
521 int
522 nouveau_bo_map(struct nouveau_bo *nvbo)
523 {
524 	int ret;
525 
526 	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
527 	if (ret)
528 		return ret;
529 
530 	ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
531 
532 	ttm_bo_unreserve(&nvbo->bo);
533 	return ret;
534 }
535 
536 void
537 nouveau_bo_unmap(struct nouveau_bo *nvbo)
538 {
539 	if (!nvbo)
540 		return;
541 
542 	ttm_bo_kunmap(&nvbo->kmap);
543 }
544 
545 void
546 nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
547 {
548 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
549 	struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
550 	int i, j;
551 
552 	if (!ttm_dma)
553 		return;
554 	if (!ttm_dma->pages) {
555 		NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
556 		return;
557 	}
558 
559 	/* Don't waste time looping if the object is coherent */
560 	if (nvbo->force_coherent)
561 		return;
562 
563 	i = 0;
564 	while (i < ttm_dma->num_pages) {
565 		struct page *p = ttm_dma->pages[i];
566 		size_t num_pages = 1;
567 
568 		for (j = i + 1; j < ttm_dma->num_pages; ++j) {
569 			if (++p != ttm_dma->pages[j])
570 				break;
571 
572 			++num_pages;
573 		}
574 		dma_sync_single_for_device(drm->dev->dev,
575 					   ttm_dma->dma_address[i],
576 					   num_pages * PAGE_SIZE, DMA_TO_DEVICE);
577 		i += num_pages;
578 	}
579 }
580 
581 void
582 nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
583 {
584 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
585 	struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
586 	int i, j;
587 
588 	if (!ttm_dma)
589 		return;
590 	if (!ttm_dma->pages) {
591 		NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
592 		return;
593 	}
594 
595 	/* Don't waste time looping if the object is coherent */
596 	if (nvbo->force_coherent)
597 		return;
598 
599 	i = 0;
600 	while (i < ttm_dma->num_pages) {
601 		struct page *p = ttm_dma->pages[i];
602 		size_t num_pages = 1;
603 
604 		for (j = i + 1; j < ttm_dma->num_pages; ++j) {
605 			if (++p != ttm_dma->pages[j])
606 				break;
607 
608 			++num_pages;
609 		}
610 
611 		dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
612 					num_pages * PAGE_SIZE, DMA_FROM_DEVICE);
613 		i += num_pages;
614 	}
615 }
616 
617 void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
618 {
619 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
620 	struct nouveau_bo *nvbo = nouveau_bo(bo);
621 
622 	mutex_lock(&drm->ttm.io_reserve_mutex);
623 	list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru);
624 	mutex_unlock(&drm->ttm.io_reserve_mutex);
625 }
626 
627 void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo)
628 {
629 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
630 	struct nouveau_bo *nvbo = nouveau_bo(bo);
631 
632 	mutex_lock(&drm->ttm.io_reserve_mutex);
633 	list_del_init(&nvbo->io_reserve_lru);
634 	mutex_unlock(&drm->ttm.io_reserve_mutex);
635 }
636 
637 int
638 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
639 		    bool no_wait_gpu)
640 {
641 	struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
642 	int ret;
643 
644 	ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx);
645 	if (ret)
646 		return ret;
647 
648 	nouveau_bo_sync_for_device(nvbo);
649 
650 	return 0;
651 }
652 
653 void
654 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
655 {
656 	bool is_iomem;
657 	u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
658 
659 	mem += index;
660 
661 	if (is_iomem)
662 		iowrite16_native(val, (void __force __iomem *)mem);
663 	else
664 		*mem = val;
665 }
666 
667 u32
668 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
669 {
670 	bool is_iomem;
671 	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
672 
673 	mem += index;
674 
675 	if (is_iomem)
676 		return ioread32_native((void __force __iomem *)mem);
677 	else
678 		return *mem;
679 }
680 
681 void
682 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
683 {
684 	bool is_iomem;
685 	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
686 
687 	mem += index;
688 
689 	if (is_iomem)
690 		iowrite32_native(val, (void __force __iomem *)mem);
691 	else
692 		*mem = val;
693 }
694 
695 static struct ttm_tt *
696 nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags)
697 {
698 #if IS_ENABLED(CONFIG_AGP)
699 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
700 
701 	if (drm->agp.bridge) {
702 		return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags);
703 	}
704 #endif
705 
706 	return nouveau_sgdma_create_ttm(bo, page_flags);
707 }
708 
709 static int
710 nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm,
711 		    struct ttm_resource *reg)
712 {
713 #if IS_ENABLED(CONFIG_AGP)
714 	struct nouveau_drm *drm = nouveau_bdev(bdev);
715 #endif
716 	if (!reg)
717 		return -EINVAL;
718 #if IS_ENABLED(CONFIG_AGP)
719 	if (drm->agp.bridge)
720 		return ttm_agp_bind(ttm, reg);
721 #endif
722 	return nouveau_sgdma_bind(bdev, ttm, reg);
723 }
724 
725 static void
726 nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
727 {
728 #if IS_ENABLED(CONFIG_AGP)
729 	struct nouveau_drm *drm = nouveau_bdev(bdev);
730 
731 	if (drm->agp.bridge) {
732 		ttm_agp_unbind(ttm);
733 		return;
734 	}
735 #endif
736 	nouveau_sgdma_unbind(bdev, ttm);
737 }
738 
739 static void
740 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
741 {
742 	struct nouveau_bo *nvbo = nouveau_bo(bo);
743 
744 	switch (bo->mem.mem_type) {
745 	case TTM_PL_VRAM:
746 		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
747 					 NOUVEAU_GEM_DOMAIN_CPU);
748 		break;
749 	default:
750 		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_CPU, 0);
751 		break;
752 	}
753 
754 	*pl = nvbo->placement;
755 }
756 
757 static int
758 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
759 		     struct ttm_resource *reg)
760 {
761 	struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
762 	struct nouveau_mem *new_mem = nouveau_mem(reg);
763 	struct nvif_vmm *vmm = &drm->client.vmm.vmm;
764 	int ret;
765 
766 	ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0,
767 			   old_mem->mem.size, &old_mem->vma[0]);
768 	if (ret)
769 		return ret;
770 
771 	ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0,
772 			   new_mem->mem.size, &old_mem->vma[1]);
773 	if (ret)
774 		goto done;
775 
776 	ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]);
777 	if (ret)
778 		goto done;
779 
780 	ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]);
781 done:
782 	if (ret) {
783 		nvif_vmm_put(vmm, &old_mem->vma[1]);
784 		nvif_vmm_put(vmm, &old_mem->vma[0]);
785 	}
786 	return 0;
787 }
788 
789 static int
790 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
791 		     struct ttm_operation_ctx *ctx,
792 		     struct ttm_resource *new_reg)
793 {
794 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
795 	struct nouveau_channel *chan = drm->ttm.chan;
796 	struct nouveau_cli *cli = (void *)chan->user.client;
797 	struct nouveau_fence *fence;
798 	int ret;
799 
800 	/* create temporary vmas for the transfer and attach them to the
801 	 * old nvkm_mem node, these will get cleaned up after ttm has
802 	 * destroyed the ttm_resource
803 	 */
804 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
805 		ret = nouveau_bo_move_prep(drm, bo, new_reg);
806 		if (ret)
807 			return ret;
808 	}
809 
810 	if (drm_drv_uses_atomic_modeset(drm->dev))
811 		mutex_lock(&cli->mutex);
812 	else
813 		mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
814 	ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible);
815 	if (ret == 0) {
816 		ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
817 		if (ret == 0) {
818 			ret = nouveau_fence_new(chan, false, &fence);
819 			if (ret == 0) {
820 				ret = ttm_bo_move_accel_cleanup(bo,
821 								&fence->base,
822 								evict, false,
823 								new_reg);
824 				nouveau_fence_unref(&fence);
825 			}
826 		}
827 	}
828 	mutex_unlock(&cli->mutex);
829 	return ret;
830 }
831 
832 void
833 nouveau_bo_move_init(struct nouveau_drm *drm)
834 {
835 	static const struct _method_table {
836 		const char *name;
837 		int engine;
838 		s32 oclass;
839 		int (*exec)(struct nouveau_channel *,
840 			    struct ttm_buffer_object *,
841 			    struct ttm_resource *, struct ttm_resource *);
842 		int (*init)(struct nouveau_channel *, u32 handle);
843 	} _methods[] = {
844 		{  "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init },
845 		{  "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init },
846 		{  "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init },
847 		{  "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init },
848 		{  "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
849 		{  "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
850 		{  "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
851 		{  "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
852 		{  "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
853 		{  "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
854 		{  "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
855 		{  "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
856 		{ "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
857 		{ "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
858 		{  "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
859 		{ "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
860 		{  "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
861 		{  "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
862 		{  "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
863 		{},
864 	};
865 	const struct _method_table *mthd = _methods;
866 	const char *name = "CPU";
867 	int ret;
868 
869 	do {
870 		struct nouveau_channel *chan;
871 
872 		if (mthd->engine)
873 			chan = drm->cechan;
874 		else
875 			chan = drm->channel;
876 		if (chan == NULL)
877 			continue;
878 
879 		ret = nvif_object_ctor(&chan->user, "ttmBoMove",
880 				       mthd->oclass | (mthd->engine << 16),
881 				       mthd->oclass, NULL, 0,
882 				       &drm->ttm.copy);
883 		if (ret == 0) {
884 			ret = mthd->init(chan, drm->ttm.copy.handle);
885 			if (ret) {
886 				nvif_object_dtor(&drm->ttm.copy);
887 				continue;
888 			}
889 
890 			drm->ttm.move = mthd->exec;
891 			drm->ttm.chan = chan;
892 			name = mthd->name;
893 			break;
894 		}
895 	} while ((++mthd)->exec);
896 
897 	NV_INFO(drm, "MM: using %s for buffer copies\n", name);
898 }
899 
900 static void
901 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
902 		     struct ttm_resource *new_reg)
903 {
904 	struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
905 	struct nouveau_bo *nvbo = nouveau_bo(bo);
906 	struct nouveau_vma *vma;
907 
908 	/* ttm can now (stupidly) pass the driver bos it didn't create... */
909 	if (bo->destroy != nouveau_bo_del_ttm)
910 		return;
911 
912 	nouveau_bo_del_io_reserve_lru(bo);
913 
914 	if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
915 	    mem->mem.page == nvbo->page) {
916 		list_for_each_entry(vma, &nvbo->vma_list, head) {
917 			nouveau_vma_map(vma, mem);
918 		}
919 	} else {
920 		list_for_each_entry(vma, &nvbo->vma_list, head) {
921 			WARN_ON(ttm_bo_wait(bo, false, false));
922 			nouveau_vma_unmap(vma);
923 		}
924 	}
925 
926 	if (new_reg) {
927 		if (new_reg->mm_node)
928 			nvbo->offset = (new_reg->start << PAGE_SHIFT);
929 		else
930 			nvbo->offset = 0;
931 	}
932 
933 }
934 
935 static int
936 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg,
937 		   struct nouveau_drm_tile **new_tile)
938 {
939 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
940 	struct drm_device *dev = drm->dev;
941 	struct nouveau_bo *nvbo = nouveau_bo(bo);
942 	u64 offset = new_reg->start << PAGE_SHIFT;
943 
944 	*new_tile = NULL;
945 	if (new_reg->mem_type != TTM_PL_VRAM)
946 		return 0;
947 
948 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
949 		*new_tile = nv10_bo_set_tiling(dev, offset, bo->base.size,
950 					       nvbo->mode, nvbo->zeta);
951 	}
952 
953 	return 0;
954 }
955 
956 static void
957 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
958 		      struct nouveau_drm_tile *new_tile,
959 		      struct nouveau_drm_tile **old_tile)
960 {
961 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
962 	struct drm_device *dev = drm->dev;
963 	struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
964 
965 	nv10_bo_put_tile_region(dev, *old_tile, fence);
966 	*old_tile = new_tile;
967 }
968 
969 static int
970 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
971 		struct ttm_operation_ctx *ctx,
972 		struct ttm_resource *new_reg,
973 		struct ttm_place *hop)
974 {
975 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
976 	struct nouveau_bo *nvbo = nouveau_bo(bo);
977 	struct ttm_resource *old_reg = &bo->mem;
978 	struct nouveau_drm_tile *new_tile = NULL;
979 	int ret = 0;
980 
981 
982 	if (new_reg->mem_type == TTM_PL_TT) {
983 		ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg);
984 		if (ret)
985 			return ret;
986 	}
987 
988 	nouveau_bo_move_ntfy(bo, evict, new_reg);
989 	ret = ttm_bo_wait_ctx(bo, ctx);
990 	if (ret)
991 		goto out_ntfy;
992 
993 	if (nvbo->bo.pin_count)
994 		NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
995 
996 	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
997 		ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
998 		if (ret)
999 			goto out_ntfy;
1000 	}
1001 
1002 	/* Fake bo copy. */
1003 	if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1004 		ttm_bo_move_null(bo, new_reg);
1005 		goto out;
1006 	}
1007 
1008 	if (old_reg->mem_type == TTM_PL_SYSTEM &&
1009 	    new_reg->mem_type == TTM_PL_TT) {
1010 		ttm_bo_move_null(bo, new_reg);
1011 		goto out;
1012 	}
1013 
1014 	if (old_reg->mem_type == TTM_PL_TT &&
1015 	    new_reg->mem_type == TTM_PL_SYSTEM) {
1016 		nouveau_ttm_tt_unbind(bo->bdev, bo->ttm);
1017 		ttm_resource_free(bo, &bo->mem);
1018 		ttm_bo_assign_mem(bo, new_reg);
1019 		goto out;
1020 	}
1021 
1022 	/* Hardware assisted copy. */
1023 	if (drm->ttm.move) {
1024 		if ((old_reg->mem_type == TTM_PL_SYSTEM &&
1025 		     new_reg->mem_type == TTM_PL_VRAM) ||
1026 		    (old_reg->mem_type == TTM_PL_VRAM &&
1027 		     new_reg->mem_type == TTM_PL_SYSTEM)) {
1028 			hop->fpfn = 0;
1029 			hop->lpfn = 0;
1030 			hop->mem_type = TTM_PL_TT;
1031 			hop->flags = 0;
1032 			return -EMULTIHOP;
1033 		}
1034 		ret = nouveau_bo_move_m2mf(bo, evict, ctx,
1035 					   new_reg);
1036 	} else
1037 		ret = -ENODEV;
1038 
1039 	if (ret) {
1040 		/* Fallback to software copy. */
1041 		ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
1042 	}
1043 
1044 out:
1045 	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1046 		if (ret)
1047 			nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1048 		else
1049 			nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1050 	}
1051 out_ntfy:
1052 	if (ret) {
1053 		swap(*new_reg, bo->mem);
1054 		nouveau_bo_move_ntfy(bo, false, new_reg);
1055 		swap(*new_reg, bo->mem);
1056 	}
1057 	return ret;
1058 }
1059 
1060 static int
1061 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1062 {
1063 	struct nouveau_bo *nvbo = nouveau_bo(bo);
1064 
1065 	return drm_vma_node_verify_access(&nvbo->bo.base.vma_node,
1066 					  filp->private_data);
1067 }
1068 
1069 static void
1070 nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm,
1071 			       struct ttm_resource *reg)
1072 {
1073 	struct nouveau_mem *mem = nouveau_mem(reg);
1074 
1075 	if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
1076 		switch (reg->mem_type) {
1077 		case TTM_PL_TT:
1078 			if (mem->kind)
1079 				nvif_object_unmap_handle(&mem->mem.object);
1080 			break;
1081 		case TTM_PL_VRAM:
1082 			nvif_object_unmap_handle(&mem->mem.object);
1083 			break;
1084 		default:
1085 			break;
1086 		}
1087 	}
1088 }
1089 
1090 static int
1091 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg)
1092 {
1093 	struct nouveau_drm *drm = nouveau_bdev(bdev);
1094 	struct nvkm_device *device = nvxx_device(&drm->client.device);
1095 	struct nouveau_mem *mem = nouveau_mem(reg);
1096 	struct nvif_mmu *mmu = &drm->client.mmu;
1097 	int ret;
1098 
1099 	mutex_lock(&drm->ttm.io_reserve_mutex);
1100 retry:
1101 	switch (reg->mem_type) {
1102 	case TTM_PL_SYSTEM:
1103 		/* System memory */
1104 		ret = 0;
1105 		goto out;
1106 	case TTM_PL_TT:
1107 #if IS_ENABLED(CONFIG_AGP)
1108 		if (drm->agp.bridge) {
1109 			reg->bus.offset = (reg->start << PAGE_SHIFT) +
1110 				drm->agp.base;
1111 			reg->bus.is_iomem = !drm->agp.cma;
1112 			reg->bus.caching = ttm_write_combined;
1113 		}
1114 #endif
1115 		if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 ||
1116 		    !mem->kind) {
1117 			/* untiled */
1118 			ret = 0;
1119 			break;
1120 		}
1121 		fallthrough;	/* tiled memory */
1122 	case TTM_PL_VRAM:
1123 		reg->bus.offset = (reg->start << PAGE_SHIFT) +
1124 			device->func->resource_addr(device, 1);
1125 		reg->bus.is_iomem = true;
1126 
1127 		/* Some BARs do not support being ioremapped WC */
1128 		if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
1129 		    mmu->type[drm->ttm.type_vram].type & NVIF_MEM_UNCACHED)
1130 			reg->bus.caching = ttm_uncached;
1131 		else
1132 			reg->bus.caching = ttm_write_combined;
1133 
1134 		if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
1135 			union {
1136 				struct nv50_mem_map_v0 nv50;
1137 				struct gf100_mem_map_v0 gf100;
1138 			} args;
1139 			u64 handle, length;
1140 			u32 argc = 0;
1141 
1142 			switch (mem->mem.object.oclass) {
1143 			case NVIF_CLASS_MEM_NV50:
1144 				args.nv50.version = 0;
1145 				args.nv50.ro = 0;
1146 				args.nv50.kind = mem->kind;
1147 				args.nv50.comp = mem->comp;
1148 				argc = sizeof(args.nv50);
1149 				break;
1150 			case NVIF_CLASS_MEM_GF100:
1151 				args.gf100.version = 0;
1152 				args.gf100.ro = 0;
1153 				args.gf100.kind = mem->kind;
1154 				argc = sizeof(args.gf100);
1155 				break;
1156 			default:
1157 				WARN_ON(1);
1158 				break;
1159 			}
1160 
1161 			ret = nvif_object_map_handle(&mem->mem.object,
1162 						     &args, argc,
1163 						     &handle, &length);
1164 			if (ret != 1) {
1165 				if (WARN_ON(ret == 0))
1166 					ret = -EINVAL;
1167 				goto out;
1168 			}
1169 
1170 			reg->bus.offset = handle;
1171 		}
1172 		ret = 0;
1173 		break;
1174 	default:
1175 		ret = -EINVAL;
1176 	}
1177 
1178 out:
1179 	if (ret == -ENOSPC) {
1180 		struct nouveau_bo *nvbo;
1181 
1182 		nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru,
1183 						typeof(*nvbo),
1184 						io_reserve_lru);
1185 		if (nvbo) {
1186 			list_del_init(&nvbo->io_reserve_lru);
1187 			drm_vma_node_unmap(&nvbo->bo.base.vma_node,
1188 					   bdev->dev_mapping);
1189 			nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem);
1190 			goto retry;
1191 		}
1192 
1193 	}
1194 	mutex_unlock(&drm->ttm.io_reserve_mutex);
1195 	return ret;
1196 }
1197 
1198 static void
1199 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg)
1200 {
1201 	struct nouveau_drm *drm = nouveau_bdev(bdev);
1202 
1203 	mutex_lock(&drm->ttm.io_reserve_mutex);
1204 	nouveau_ttm_io_mem_free_locked(drm, reg);
1205 	mutex_unlock(&drm->ttm.io_reserve_mutex);
1206 }
1207 
1208 vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1209 {
1210 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1211 	struct nouveau_bo *nvbo = nouveau_bo(bo);
1212 	struct nvkm_device *device = nvxx_device(&drm->client.device);
1213 	u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
1214 	int i, ret;
1215 
1216 	/* as long as the bo isn't in vram, and isn't tiled, we've got
1217 	 * nothing to do here.
1218 	 */
1219 	if (bo->mem.mem_type != TTM_PL_VRAM) {
1220 		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
1221 		    !nvbo->kind)
1222 			return 0;
1223 
1224 		if (bo->mem.mem_type != TTM_PL_SYSTEM)
1225 			return 0;
1226 
1227 		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
1228 
1229 	} else {
1230 		/* make sure bo is in mappable vram */
1231 		if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
1232 		    bo->mem.start + bo->mem.num_pages < mappable)
1233 			return 0;
1234 
1235 		for (i = 0; i < nvbo->placement.num_placement; ++i) {
1236 			nvbo->placements[i].fpfn = 0;
1237 			nvbo->placements[i].lpfn = mappable;
1238 		}
1239 
1240 		for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
1241 			nvbo->busy_placements[i].fpfn = 0;
1242 			nvbo->busy_placements[i].lpfn = mappable;
1243 		}
1244 
1245 		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
1246 	}
1247 
1248 	ret = nouveau_bo_validate(nvbo, false, false);
1249 	if (unlikely(ret == -EBUSY || ret == -ERESTARTSYS))
1250 		return VM_FAULT_NOPAGE;
1251 	else if (unlikely(ret))
1252 		return VM_FAULT_SIGBUS;
1253 
1254 	ttm_bo_move_to_lru_tail_unlocked(bo);
1255 	return 0;
1256 }
1257 
1258 static int
1259 nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
1260 			struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
1261 {
1262 	struct ttm_tt *ttm_dma = (void *)ttm;
1263 	struct nouveau_drm *drm;
1264 	struct device *dev;
1265 	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1266 
1267 	if (ttm_tt_is_populated(ttm))
1268 		return 0;
1269 
1270 	if (slave && ttm->sg) {
1271 		drm_prime_sg_to_dma_addr_array(ttm->sg, ttm_dma->dma_address,
1272 					       ttm->num_pages);
1273 		return 0;
1274 	}
1275 
1276 	drm = nouveau_bdev(bdev);
1277 	dev = drm->dev->dev;
1278 
1279 	return ttm_pool_alloc(&drm->ttm.bdev.pool, ttm, ctx);
1280 }
1281 
1282 static void
1283 nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
1284 			  struct ttm_tt *ttm)
1285 {
1286 	struct nouveau_drm *drm;
1287 	struct device *dev;
1288 	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1289 
1290 	if (slave)
1291 		return;
1292 
1293 	drm = nouveau_bdev(bdev);
1294 	dev = drm->dev->dev;
1295 
1296 	return ttm_pool_free(&drm->ttm.bdev.pool, ttm);
1297 }
1298 
1299 static void
1300 nouveau_ttm_tt_destroy(struct ttm_bo_device *bdev,
1301 		       struct ttm_tt *ttm)
1302 {
1303 #if IS_ENABLED(CONFIG_AGP)
1304 	struct nouveau_drm *drm = nouveau_bdev(bdev);
1305 	if (drm->agp.bridge) {
1306 		ttm_agp_unbind(ttm);
1307 		ttm_tt_destroy_common(bdev, ttm);
1308 		ttm_agp_destroy(ttm);
1309 		return;
1310 	}
1311 #endif
1312 	nouveau_sgdma_destroy(bdev, ttm);
1313 }
1314 
1315 void
1316 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
1317 {
1318 	struct dma_resv *resv = nvbo->bo.base.resv;
1319 
1320 	if (exclusive)
1321 		dma_resv_add_excl_fence(resv, &fence->base);
1322 	else if (fence)
1323 		dma_resv_add_shared_fence(resv, &fence->base);
1324 }
1325 
1326 static void
1327 nouveau_bo_delete_mem_notify(struct ttm_buffer_object *bo)
1328 {
1329 	nouveau_bo_move_ntfy(bo, false, NULL);
1330 }
1331 
1332 struct ttm_bo_driver nouveau_bo_driver = {
1333 	.ttm_tt_create = &nouveau_ttm_tt_create,
1334 	.ttm_tt_populate = &nouveau_ttm_tt_populate,
1335 	.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1336 	.ttm_tt_destroy = &nouveau_ttm_tt_destroy,
1337 	.eviction_valuable = ttm_bo_eviction_valuable,
1338 	.evict_flags = nouveau_bo_evict_flags,
1339 	.delete_mem_notify = nouveau_bo_delete_mem_notify,
1340 	.move = nouveau_bo_move,
1341 	.verify_access = nouveau_bo_verify_access,
1342 	.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1343 	.io_mem_free = &nouveau_ttm_io_mem_free,
1344 };
1345