1 /*
2  * Copyright 2007 Dave Airlied
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 /*
25  * Authors: Dave Airlied <airlied@linux.ie>
26  *	    Ben Skeggs   <darktama@iinet.net.au>
27  *	    Jeremy Kolb  <jkolb@brandeis.edu>
28  */
29 
30 #include <linux/dma-mapping.h>
31 #include <linux/swiotlb.h>
32 
33 #include "nouveau_drv.h"
34 #include "nouveau_chan.h"
35 #include "nouveau_fence.h"
36 
37 #include "nouveau_bo.h"
38 #include "nouveau_ttm.h"
39 #include "nouveau_gem.h"
40 #include "nouveau_mem.h"
41 #include "nouveau_vmm.h"
42 
43 #include <nvif/class.h>
44 #include <nvif/if500b.h>
45 #include <nvif/if900b.h>
46 
47 /*
48  * NV10-NV40 tiling helpers
49  */
50 
51 static void
52 nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
53 			   u32 addr, u32 size, u32 pitch, u32 flags)
54 {
55 	struct nouveau_drm *drm = nouveau_drm(dev);
56 	int i = reg - drm->tile.reg;
57 	struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
58 	struct nvkm_fb_tile *tile = &fb->tile.region[i];
59 
60 	nouveau_fence_unref(&reg->fence);
61 
62 	if (tile->pitch)
63 		nvkm_fb_tile_fini(fb, i, tile);
64 
65 	if (pitch)
66 		nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
67 
68 	nvkm_fb_tile_prog(fb, i, tile);
69 }
70 
71 static struct nouveau_drm_tile *
72 nv10_bo_get_tile_region(struct drm_device *dev, int i)
73 {
74 	struct nouveau_drm *drm = nouveau_drm(dev);
75 	struct nouveau_drm_tile *tile = &drm->tile.reg[i];
76 
77 	spin_lock(&drm->tile.lock);
78 
79 	if (!tile->used &&
80 	    (!tile->fence || nouveau_fence_done(tile->fence)))
81 		tile->used = true;
82 	else
83 		tile = NULL;
84 
85 	spin_unlock(&drm->tile.lock);
86 	return tile;
87 }
88 
89 static void
90 nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
91 			struct dma_fence *fence)
92 {
93 	struct nouveau_drm *drm = nouveau_drm(dev);
94 
95 	if (tile) {
96 		spin_lock(&drm->tile.lock);
97 		tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
98 		tile->used = false;
99 		spin_unlock(&drm->tile.lock);
100 	}
101 }
102 
103 static struct nouveau_drm_tile *
104 nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
105 		   u32 size, u32 pitch, u32 zeta)
106 {
107 	struct nouveau_drm *drm = nouveau_drm(dev);
108 	struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
109 	struct nouveau_drm_tile *tile, *found = NULL;
110 	int i;
111 
112 	for (i = 0; i < fb->tile.regions; i++) {
113 		tile = nv10_bo_get_tile_region(dev, i);
114 
115 		if (pitch && !found) {
116 			found = tile;
117 			continue;
118 
119 		} else if (tile && fb->tile.region[i].pitch) {
120 			/* Kill an unused tile region. */
121 			nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
122 		}
123 
124 		nv10_bo_put_tile_region(dev, tile, NULL);
125 	}
126 
127 	if (found)
128 		nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta);
129 	return found;
130 }
131 
132 static void
133 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
134 {
135 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
136 	struct drm_device *dev = drm->dev;
137 	struct nouveau_bo *nvbo = nouveau_bo(bo);
138 
139 	WARN_ON(nvbo->pin_refcnt > 0);
140 	nouveau_bo_del_io_reserve_lru(bo);
141 	nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
142 
143 	/*
144 	 * If nouveau_bo_new() allocated this buffer, the GEM object was never
145 	 * initialized, so don't attempt to release it.
146 	 */
147 	if (bo->base.dev)
148 		drm_gem_object_release(&bo->base);
149 
150 	kfree(nvbo);
151 }
152 
153 static inline u64
154 roundup_64(u64 x, u32 y)
155 {
156 	x += y - 1;
157 	do_div(x, y);
158 	return x * y;
159 }
160 
161 static void
162 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
163 		       int *align, u64 *size)
164 {
165 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
166 	struct nvif_device *device = &drm->client.device;
167 
168 	if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
169 		if (nvbo->mode) {
170 			if (device->info.chipset >= 0x40) {
171 				*align = 65536;
172 				*size = roundup_64(*size, 64 * nvbo->mode);
173 
174 			} else if (device->info.chipset >= 0x30) {
175 				*align = 32768;
176 				*size = roundup_64(*size, 64 * nvbo->mode);
177 
178 			} else if (device->info.chipset >= 0x20) {
179 				*align = 16384;
180 				*size = roundup_64(*size, 64 * nvbo->mode);
181 
182 			} else if (device->info.chipset >= 0x10) {
183 				*align = 16384;
184 				*size = roundup_64(*size, 32 * nvbo->mode);
185 			}
186 		}
187 	} else {
188 		*size = roundup_64(*size, (1 << nvbo->page));
189 		*align = max((1 <<  nvbo->page), *align);
190 	}
191 
192 	*size = roundup_64(*size, PAGE_SIZE);
193 }
194 
195 struct nouveau_bo *
196 nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags,
197 		 u32 tile_mode, u32 tile_flags)
198 {
199 	struct nouveau_drm *drm = cli->drm;
200 	struct nouveau_bo *nvbo;
201 	struct nvif_mmu *mmu = &cli->mmu;
202 	struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm;
203 	int i, pi = -1;
204 
205 	if (!*size) {
206 		NV_WARN(drm, "skipped size %016llx\n", *size);
207 		return ERR_PTR(-EINVAL);
208 	}
209 
210 	nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
211 	if (!nvbo)
212 		return ERR_PTR(-ENOMEM);
213 	INIT_LIST_HEAD(&nvbo->head);
214 	INIT_LIST_HEAD(&nvbo->entry);
215 	INIT_LIST_HEAD(&nvbo->vma_list);
216 	nvbo->bo.bdev = &drm->ttm.bdev;
217 
218 	/* This is confusing, and doesn't actually mean we want an uncached
219 	 * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
220 	 * into in nouveau_gem_new().
221 	 */
222 	if (flags & TTM_PL_FLAG_UNCACHED) {
223 		/* Determine if we can get a cache-coherent map, forcing
224 		 * uncached mapping if we can't.
225 		 */
226 		if (!nouveau_drm_use_coherent_gpu_mapping(drm))
227 			nvbo->force_coherent = true;
228 	}
229 
230 	if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
231 		nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
232 		if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
233 			kfree(nvbo);
234 			return ERR_PTR(-EINVAL);
235 		}
236 
237 		nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
238 	} else
239 	if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
240 		nvbo->kind = (tile_flags & 0x00007f00) >> 8;
241 		nvbo->comp = (tile_flags & 0x00030000) >> 16;
242 		if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
243 			kfree(nvbo);
244 			return ERR_PTR(-EINVAL);
245 		}
246 	} else {
247 		nvbo->zeta = (tile_flags & 0x00000007);
248 	}
249 	nvbo->mode = tile_mode;
250 	nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
251 
252 	/* Determine the desirable target GPU page size for the buffer. */
253 	for (i = 0; i < vmm->page_nr; i++) {
254 		/* Because we cannot currently allow VMM maps to fail
255 		 * during buffer migration, we need to determine page
256 		 * size for the buffer up-front, and pre-allocate its
257 		 * page tables.
258 		 *
259 		 * Skip page sizes that can't support needed domains.
260 		 */
261 		if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
262 		    (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram)
263 			continue;
264 		if ((flags & TTM_PL_FLAG_TT) &&
265 		    (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
266 			continue;
267 
268 		/* Select this page size if it's the first that supports
269 		 * the potential memory domains, or when it's compatible
270 		 * with the requested compression settings.
271 		 */
272 		if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
273 			pi = i;
274 
275 		/* Stop once the buffer is larger than the current page size. */
276 		if (*size >= 1ULL << vmm->page[i].shift)
277 			break;
278 	}
279 
280 	if (WARN_ON(pi < 0))
281 		return ERR_PTR(-EINVAL);
282 
283 	/* Disable compression if suitable settings couldn't be found. */
284 	if (nvbo->comp && !vmm->page[pi].comp) {
285 		if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
286 			nvbo->kind = mmu->kind[nvbo->kind];
287 		nvbo->comp = 0;
288 	}
289 	nvbo->page = vmm->page[pi].shift;
290 
291 	nouveau_bo_fixup_align(nvbo, flags, align, size);
292 
293 	return nvbo;
294 }
295 
296 int
297 nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags,
298 		struct sg_table *sg, struct dma_resv *robj)
299 {
300 	int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
301 	size_t acc_size;
302 	int ret;
303 
304 	acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo));
305 
306 	nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
307 	nouveau_bo_placement_set(nvbo, flags, 0);
308 	INIT_LIST_HEAD(&nvbo->io_reserve_lru);
309 
310 	ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
311 			  &nvbo->placement, align >> PAGE_SHIFT, false,
312 			  acc_size, sg, robj, nouveau_bo_del_ttm);
313 	if (ret) {
314 		/* ttm will call nouveau_bo_del_ttm if it fails.. */
315 		return ret;
316 	}
317 
318 	return 0;
319 }
320 
321 int
322 nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
323 	       uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
324 	       struct sg_table *sg, struct dma_resv *robj,
325 	       struct nouveau_bo **pnvbo)
326 {
327 	struct nouveau_bo *nvbo;
328 	int ret;
329 
330 	nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode,
331 				tile_flags);
332 	if (IS_ERR(nvbo))
333 		return PTR_ERR(nvbo);
334 
335 	ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj);
336 	if (ret)
337 		return ret;
338 
339 	*pnvbo = nvbo;
340 	return 0;
341 }
342 
343 static void
344 set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
345 {
346 	*n = 0;
347 
348 	if (type & TTM_PL_FLAG_VRAM)
349 		pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
350 	if (type & TTM_PL_FLAG_TT)
351 		pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
352 	if (type & TTM_PL_FLAG_SYSTEM)
353 		pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
354 }
355 
356 static void
357 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
358 {
359 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
360 	u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT;
361 	unsigned i, fpfn, lpfn;
362 
363 	if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
364 	    nvbo->mode && (type & TTM_PL_FLAG_VRAM) &&
365 	    nvbo->bo.mem.num_pages < vram_pages / 4) {
366 		/*
367 		 * Make sure that the color and depth buffers are handled
368 		 * by independent memory controller units. Up to a 9x
369 		 * speed up when alpha-blending and depth-test are enabled
370 		 * at the same time.
371 		 */
372 		if (nvbo->zeta) {
373 			fpfn = vram_pages / 2;
374 			lpfn = ~0;
375 		} else {
376 			fpfn = 0;
377 			lpfn = vram_pages / 2;
378 		}
379 		for (i = 0; i < nvbo->placement.num_placement; ++i) {
380 			nvbo->placements[i].fpfn = fpfn;
381 			nvbo->placements[i].lpfn = lpfn;
382 		}
383 		for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
384 			nvbo->busy_placements[i].fpfn = fpfn;
385 			nvbo->busy_placements[i].lpfn = lpfn;
386 		}
387 	}
388 }
389 
390 void
391 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
392 {
393 	struct ttm_placement *pl = &nvbo->placement;
394 	uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
395 						 TTM_PL_MASK_CACHING) |
396 			 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
397 
398 	pl->placement = nvbo->placements;
399 	set_placement_list(nvbo->placements, &pl->num_placement,
400 			   type, flags);
401 
402 	pl->busy_placement = nvbo->busy_placements;
403 	set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
404 			   type | busy, flags);
405 
406 	set_placement_range(nvbo, type);
407 }
408 
409 int
410 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
411 {
412 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
413 	struct ttm_buffer_object *bo = &nvbo->bo;
414 	bool force = false, evict = false;
415 	int ret;
416 
417 	ret = ttm_bo_reserve(bo, false, false, NULL);
418 	if (ret)
419 		return ret;
420 
421 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
422 	    memtype == TTM_PL_FLAG_VRAM && contig) {
423 		if (!nvbo->contig) {
424 			nvbo->contig = true;
425 			force = true;
426 			evict = true;
427 		}
428 	}
429 
430 	if (nvbo->pin_refcnt) {
431 		if (!(memtype & (1 << bo->mem.mem_type)) || evict) {
432 			NV_ERROR(drm, "bo %p pinned elsewhere: "
433 				      "0x%08x vs 0x%08x\n", bo,
434 				 1 << bo->mem.mem_type, memtype);
435 			ret = -EBUSY;
436 		}
437 		nvbo->pin_refcnt++;
438 		goto out;
439 	}
440 
441 	if (evict) {
442 		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
443 		ret = nouveau_bo_validate(nvbo, false, false);
444 		if (ret)
445 			goto out;
446 	}
447 
448 	nvbo->pin_refcnt++;
449 	nouveau_bo_placement_set(nvbo, memtype, 0);
450 
451 	/* drop pin_refcnt temporarily, so we don't trip the assertion
452 	 * in nouveau_bo_move() that makes sure we're not trying to
453 	 * move a pinned buffer
454 	 */
455 	nvbo->pin_refcnt--;
456 	ret = nouveau_bo_validate(nvbo, false, false);
457 	if (ret)
458 		goto out;
459 	nvbo->pin_refcnt++;
460 
461 	switch (bo->mem.mem_type) {
462 	case TTM_PL_VRAM:
463 		drm->gem.vram_available -= bo->mem.size;
464 		break;
465 	case TTM_PL_TT:
466 		drm->gem.gart_available -= bo->mem.size;
467 		break;
468 	default:
469 		break;
470 	}
471 
472 out:
473 	if (force && ret)
474 		nvbo->contig = false;
475 	ttm_bo_unreserve(bo);
476 	return ret;
477 }
478 
479 int
480 nouveau_bo_unpin(struct nouveau_bo *nvbo)
481 {
482 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
483 	struct ttm_buffer_object *bo = &nvbo->bo;
484 	int ret, ref;
485 
486 	ret = ttm_bo_reserve(bo, false, false, NULL);
487 	if (ret)
488 		return ret;
489 
490 	ref = --nvbo->pin_refcnt;
491 	WARN_ON_ONCE(ref < 0);
492 	if (ref)
493 		goto out;
494 
495 	nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
496 
497 	ret = nouveau_bo_validate(nvbo, false, false);
498 	if (ret == 0) {
499 		switch (bo->mem.mem_type) {
500 		case TTM_PL_VRAM:
501 			drm->gem.vram_available += bo->mem.size;
502 			break;
503 		case TTM_PL_TT:
504 			drm->gem.gart_available += bo->mem.size;
505 			break;
506 		default:
507 			break;
508 		}
509 	}
510 
511 out:
512 	ttm_bo_unreserve(bo);
513 	return ret;
514 }
515 
516 int
517 nouveau_bo_map(struct nouveau_bo *nvbo)
518 {
519 	int ret;
520 
521 	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
522 	if (ret)
523 		return ret;
524 
525 	ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
526 
527 	ttm_bo_unreserve(&nvbo->bo);
528 	return ret;
529 }
530 
531 void
532 nouveau_bo_unmap(struct nouveau_bo *nvbo)
533 {
534 	if (!nvbo)
535 		return;
536 
537 	ttm_bo_kunmap(&nvbo->kmap);
538 }
539 
540 void
541 nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
542 {
543 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
544 	struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
545 	int i;
546 
547 	if (!ttm_dma)
548 		return;
549 
550 	/* Don't waste time looping if the object is coherent */
551 	if (nvbo->force_coherent)
552 		return;
553 
554 	for (i = 0; i < ttm_dma->ttm.num_pages; i++)
555 		dma_sync_single_for_device(drm->dev->dev,
556 					   ttm_dma->dma_address[i],
557 					   PAGE_SIZE, DMA_TO_DEVICE);
558 }
559 
560 void
561 nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
562 {
563 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
564 	struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
565 	int i;
566 
567 	if (!ttm_dma)
568 		return;
569 
570 	/* Don't waste time looping if the object is coherent */
571 	if (nvbo->force_coherent)
572 		return;
573 
574 	for (i = 0; i < ttm_dma->ttm.num_pages; i++)
575 		dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
576 					PAGE_SIZE, DMA_FROM_DEVICE);
577 }
578 
579 void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
580 {
581 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
582 	struct nouveau_bo *nvbo = nouveau_bo(bo);
583 
584 	mutex_lock(&drm->ttm.io_reserve_mutex);
585 	list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru);
586 	mutex_unlock(&drm->ttm.io_reserve_mutex);
587 }
588 
589 void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo)
590 {
591 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
592 	struct nouveau_bo *nvbo = nouveau_bo(bo);
593 
594 	mutex_lock(&drm->ttm.io_reserve_mutex);
595 	list_del_init(&nvbo->io_reserve_lru);
596 	mutex_unlock(&drm->ttm.io_reserve_mutex);
597 }
598 
599 int
600 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
601 		    bool no_wait_gpu)
602 {
603 	struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
604 	int ret;
605 
606 	ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx);
607 	if (ret)
608 		return ret;
609 
610 	nouveau_bo_sync_for_device(nvbo);
611 
612 	return 0;
613 }
614 
615 void
616 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
617 {
618 	bool is_iomem;
619 	u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
620 
621 	mem += index;
622 
623 	if (is_iomem)
624 		iowrite16_native(val, (void __force __iomem *)mem);
625 	else
626 		*mem = val;
627 }
628 
629 u32
630 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
631 {
632 	bool is_iomem;
633 	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
634 
635 	mem += index;
636 
637 	if (is_iomem)
638 		return ioread32_native((void __force __iomem *)mem);
639 	else
640 		return *mem;
641 }
642 
643 void
644 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
645 {
646 	bool is_iomem;
647 	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
648 
649 	mem += index;
650 
651 	if (is_iomem)
652 		iowrite32_native(val, (void __force __iomem *)mem);
653 	else
654 		*mem = val;
655 }
656 
657 static struct ttm_tt *
658 nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags)
659 {
660 #if IS_ENABLED(CONFIG_AGP)
661 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
662 
663 	if (drm->agp.bridge) {
664 		return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags);
665 	}
666 #endif
667 
668 	return nouveau_sgdma_create_ttm(bo, page_flags);
669 }
670 
671 static void
672 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
673 {
674 	struct nouveau_bo *nvbo = nouveau_bo(bo);
675 
676 	switch (bo->mem.mem_type) {
677 	case TTM_PL_VRAM:
678 		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
679 					 TTM_PL_FLAG_SYSTEM);
680 		break;
681 	default:
682 		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
683 		break;
684 	}
685 
686 	*pl = nvbo->placement;
687 }
688 
689 static int
690 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
691 		     struct ttm_resource *reg)
692 {
693 	struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
694 	struct nouveau_mem *new_mem = nouveau_mem(reg);
695 	struct nvif_vmm *vmm = &drm->client.vmm.vmm;
696 	int ret;
697 
698 	ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0,
699 			   old_mem->mem.size, &old_mem->vma[0]);
700 	if (ret)
701 		return ret;
702 
703 	ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0,
704 			   new_mem->mem.size, &old_mem->vma[1]);
705 	if (ret)
706 		goto done;
707 
708 	ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]);
709 	if (ret)
710 		goto done;
711 
712 	ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]);
713 done:
714 	if (ret) {
715 		nvif_vmm_put(vmm, &old_mem->vma[1]);
716 		nvif_vmm_put(vmm, &old_mem->vma[0]);
717 	}
718 	return 0;
719 }
720 
721 static int
722 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
723 		     bool no_wait_gpu, struct ttm_resource *new_reg)
724 {
725 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
726 	struct nouveau_channel *chan = drm->ttm.chan;
727 	struct nouveau_cli *cli = (void *)chan->user.client;
728 	struct nouveau_fence *fence;
729 	int ret;
730 
731 	/* create temporary vmas for the transfer and attach them to the
732 	 * old nvkm_mem node, these will get cleaned up after ttm has
733 	 * destroyed the ttm_resource
734 	 */
735 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
736 		ret = nouveau_bo_move_prep(drm, bo, new_reg);
737 		if (ret)
738 			return ret;
739 	}
740 
741 	mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
742 	ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
743 	if (ret == 0) {
744 		ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
745 		if (ret == 0) {
746 			ret = nouveau_fence_new(chan, false, &fence);
747 			if (ret == 0) {
748 				ret = ttm_bo_move_accel_cleanup(bo,
749 								&fence->base,
750 								evict,
751 								new_reg);
752 				nouveau_fence_unref(&fence);
753 			}
754 		}
755 	}
756 	mutex_unlock(&cli->mutex);
757 	return ret;
758 }
759 
760 void
761 nouveau_bo_move_init(struct nouveau_drm *drm)
762 {
763 	static const struct _method_table {
764 		const char *name;
765 		int engine;
766 		s32 oclass;
767 		int (*exec)(struct nouveau_channel *,
768 			    struct ttm_buffer_object *,
769 			    struct ttm_resource *, struct ttm_resource *);
770 		int (*init)(struct nouveau_channel *, u32 handle);
771 	} _methods[] = {
772 		{  "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init },
773 		{  "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init },
774 		{  "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init },
775 		{  "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init },
776 		{  "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
777 		{  "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
778 		{  "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
779 		{  "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
780 		{  "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
781 		{  "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
782 		{  "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
783 		{  "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
784 		{ "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
785 		{ "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
786 		{  "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
787 		{ "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
788 		{  "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
789 		{  "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
790 		{  "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
791 		{},
792 	};
793 	const struct _method_table *mthd = _methods;
794 	const char *name = "CPU";
795 	int ret;
796 
797 	do {
798 		struct nouveau_channel *chan;
799 
800 		if (mthd->engine)
801 			chan = drm->cechan;
802 		else
803 			chan = drm->channel;
804 		if (chan == NULL)
805 			continue;
806 
807 		ret = nvif_object_ctor(&chan->user, "ttmBoMove",
808 				       mthd->oclass | (mthd->engine << 16),
809 				       mthd->oclass, NULL, 0,
810 				       &drm->ttm.copy);
811 		if (ret == 0) {
812 			ret = mthd->init(chan, drm->ttm.copy.handle);
813 			if (ret) {
814 				nvif_object_dtor(&drm->ttm.copy);
815 				continue;
816 			}
817 
818 			drm->ttm.move = mthd->exec;
819 			drm->ttm.chan = chan;
820 			name = mthd->name;
821 			break;
822 		}
823 	} while ((++mthd)->exec);
824 
825 	NV_INFO(drm, "MM: using %s for buffer copies\n", name);
826 }
827 
828 static int
829 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
830 		      bool no_wait_gpu, struct ttm_resource *new_reg)
831 {
832 	struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
833 	struct ttm_place placement_memtype = {
834 		.fpfn = 0,
835 		.lpfn = 0,
836 		.flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
837 	};
838 	struct ttm_placement placement;
839 	struct ttm_resource tmp_reg;
840 	int ret;
841 
842 	placement.num_placement = placement.num_busy_placement = 1;
843 	placement.placement = placement.busy_placement = &placement_memtype;
844 
845 	tmp_reg = *new_reg;
846 	tmp_reg.mm_node = NULL;
847 	ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx);
848 	if (ret)
849 		return ret;
850 
851 	ret = ttm_tt_bind(bo->bdev, bo->ttm, &tmp_reg, &ctx);
852 	if (ret)
853 		goto out;
854 
855 	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_reg);
856 	if (ret)
857 		goto out;
858 
859 	ret = ttm_bo_move_ttm(bo, &ctx, new_reg);
860 out:
861 	ttm_resource_free(bo, &tmp_reg);
862 	return ret;
863 }
864 
865 static int
866 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
867 		      bool no_wait_gpu, struct ttm_resource *new_reg)
868 {
869 	struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
870 	struct ttm_place placement_memtype = {
871 		.fpfn = 0,
872 		.lpfn = 0,
873 		.flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
874 	};
875 	struct ttm_placement placement;
876 	struct ttm_resource tmp_reg;
877 	int ret;
878 
879 	placement.num_placement = placement.num_busy_placement = 1;
880 	placement.placement = placement.busy_placement = &placement_memtype;
881 
882 	tmp_reg = *new_reg;
883 	tmp_reg.mm_node = NULL;
884 	ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx);
885 	if (ret)
886 		return ret;
887 
888 	ret = ttm_bo_move_ttm(bo, &ctx, &tmp_reg);
889 	if (ret)
890 		goto out;
891 
892 	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_reg);
893 	if (ret)
894 		goto out;
895 
896 out:
897 	ttm_resource_free(bo, &tmp_reg);
898 	return ret;
899 }
900 
901 static void
902 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
903 		     struct ttm_resource *new_reg)
904 {
905 	struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
906 	struct nouveau_bo *nvbo = nouveau_bo(bo);
907 	struct nouveau_vma *vma;
908 
909 	/* ttm can now (stupidly) pass the driver bos it didn't create... */
910 	if (bo->destroy != nouveau_bo_del_ttm)
911 		return;
912 
913 	nouveau_bo_del_io_reserve_lru(bo);
914 
915 	if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
916 	    mem->mem.page == nvbo->page) {
917 		list_for_each_entry(vma, &nvbo->vma_list, head) {
918 			nouveau_vma_map(vma, mem);
919 		}
920 	} else {
921 		list_for_each_entry(vma, &nvbo->vma_list, head) {
922 			WARN_ON(ttm_bo_wait(bo, false, false));
923 			nouveau_vma_unmap(vma);
924 		}
925 	}
926 
927 	if (new_reg) {
928 		if (new_reg->mm_node)
929 			nvbo->offset = (new_reg->start << PAGE_SHIFT);
930 		else
931 			nvbo->offset = 0;
932 	}
933 
934 }
935 
936 static int
937 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg,
938 		   struct nouveau_drm_tile **new_tile)
939 {
940 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
941 	struct drm_device *dev = drm->dev;
942 	struct nouveau_bo *nvbo = nouveau_bo(bo);
943 	u64 offset = new_reg->start << PAGE_SHIFT;
944 
945 	*new_tile = NULL;
946 	if (new_reg->mem_type != TTM_PL_VRAM)
947 		return 0;
948 
949 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
950 		*new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
951 					       nvbo->mode, nvbo->zeta);
952 	}
953 
954 	return 0;
955 }
956 
957 static void
958 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
959 		      struct nouveau_drm_tile *new_tile,
960 		      struct nouveau_drm_tile **old_tile)
961 {
962 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
963 	struct drm_device *dev = drm->dev;
964 	struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
965 
966 	nv10_bo_put_tile_region(dev, *old_tile, fence);
967 	*old_tile = new_tile;
968 }
969 
970 static int
971 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
972 		struct ttm_operation_ctx *ctx,
973 		struct ttm_resource *new_reg)
974 {
975 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
976 	struct nouveau_bo *nvbo = nouveau_bo(bo);
977 	struct ttm_resource *old_reg = &bo->mem;
978 	struct nouveau_drm_tile *new_tile = NULL;
979 	int ret = 0;
980 
981 	ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
982 	if (ret)
983 		return ret;
984 
985 	if (nvbo->pin_refcnt)
986 		NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
987 
988 	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
989 		ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
990 		if (ret)
991 			return ret;
992 	}
993 
994 	/* Fake bo copy. */
995 	if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
996 		BUG_ON(bo->mem.mm_node != NULL);
997 		bo->mem = *new_reg;
998 		new_reg->mm_node = NULL;
999 		goto out;
1000 	}
1001 
1002 	/* Hardware assisted copy. */
1003 	if (drm->ttm.move) {
1004 		if (new_reg->mem_type == TTM_PL_SYSTEM)
1005 			ret = nouveau_bo_move_flipd(bo, evict,
1006 						    ctx->interruptible,
1007 						    ctx->no_wait_gpu, new_reg);
1008 		else if (old_reg->mem_type == TTM_PL_SYSTEM)
1009 			ret = nouveau_bo_move_flips(bo, evict,
1010 						    ctx->interruptible,
1011 						    ctx->no_wait_gpu, new_reg);
1012 		else
1013 			ret = nouveau_bo_move_m2mf(bo, evict,
1014 						   ctx->interruptible,
1015 						   ctx->no_wait_gpu, new_reg);
1016 		if (!ret)
1017 			goto out;
1018 	}
1019 
1020 	/* Fallback to software copy. */
1021 	ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
1022 	if (ret == 0)
1023 		ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
1024 
1025 out:
1026 	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1027 		if (ret)
1028 			nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1029 		else
1030 			nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1031 	}
1032 
1033 	return ret;
1034 }
1035 
1036 static int
1037 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1038 {
1039 	struct nouveau_bo *nvbo = nouveau_bo(bo);
1040 
1041 	return drm_vma_node_verify_access(&nvbo->bo.base.vma_node,
1042 					  filp->private_data);
1043 }
1044 
1045 static void
1046 nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm,
1047 			       struct ttm_resource *reg)
1048 {
1049 	struct nouveau_mem *mem = nouveau_mem(reg);
1050 
1051 	if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
1052 		switch (reg->mem_type) {
1053 		case TTM_PL_TT:
1054 			if (mem->kind)
1055 				nvif_object_unmap_handle(&mem->mem.object);
1056 			break;
1057 		case TTM_PL_VRAM:
1058 			nvif_object_unmap_handle(&mem->mem.object);
1059 			break;
1060 		default:
1061 			break;
1062 		}
1063 	}
1064 }
1065 
1066 static int
1067 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg)
1068 {
1069 	struct nouveau_drm *drm = nouveau_bdev(bdev);
1070 	struct nvkm_device *device = nvxx_device(&drm->client.device);
1071 	struct nouveau_mem *mem = nouveau_mem(reg);
1072 	int ret;
1073 
1074 	mutex_lock(&drm->ttm.io_reserve_mutex);
1075 retry:
1076 	switch (reg->mem_type) {
1077 	case TTM_PL_SYSTEM:
1078 		/* System memory */
1079 		ret = 0;
1080 		goto out;
1081 	case TTM_PL_TT:
1082 #if IS_ENABLED(CONFIG_AGP)
1083 		if (drm->agp.bridge) {
1084 			reg->bus.offset = (reg->start << PAGE_SHIFT) +
1085 				drm->agp.base;
1086 			reg->bus.is_iomem = !drm->agp.cma;
1087 		}
1088 #endif
1089 		if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 ||
1090 		    !mem->kind) {
1091 			/* untiled */
1092 			ret = 0;
1093 			break;
1094 		}
1095 		fallthrough;	/* tiled memory */
1096 	case TTM_PL_VRAM:
1097 		reg->bus.offset = (reg->start << PAGE_SHIFT) +
1098 			device->func->resource_addr(device, 1);
1099 		reg->bus.is_iomem = true;
1100 		if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
1101 			union {
1102 				struct nv50_mem_map_v0 nv50;
1103 				struct gf100_mem_map_v0 gf100;
1104 			} args;
1105 			u64 handle, length;
1106 			u32 argc = 0;
1107 
1108 			switch (mem->mem.object.oclass) {
1109 			case NVIF_CLASS_MEM_NV50:
1110 				args.nv50.version = 0;
1111 				args.nv50.ro = 0;
1112 				args.nv50.kind = mem->kind;
1113 				args.nv50.comp = mem->comp;
1114 				argc = sizeof(args.nv50);
1115 				break;
1116 			case NVIF_CLASS_MEM_GF100:
1117 				args.gf100.version = 0;
1118 				args.gf100.ro = 0;
1119 				args.gf100.kind = mem->kind;
1120 				argc = sizeof(args.gf100);
1121 				break;
1122 			default:
1123 				WARN_ON(1);
1124 				break;
1125 			}
1126 
1127 			ret = nvif_object_map_handle(&mem->mem.object,
1128 						     &args, argc,
1129 						     &handle, &length);
1130 			if (ret != 1) {
1131 				if (WARN_ON(ret == 0))
1132 					ret = -EINVAL;
1133 				goto out;
1134 			}
1135 
1136 			reg->bus.offset = handle;
1137 			ret = 0;
1138 		}
1139 		break;
1140 	default:
1141 		ret = -EINVAL;
1142 	}
1143 
1144 out:
1145 	if (ret == -ENOSPC) {
1146 		struct nouveau_bo *nvbo;
1147 
1148 		nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru,
1149 						typeof(*nvbo),
1150 						io_reserve_lru);
1151 		if (nvbo) {
1152 			list_del_init(&nvbo->io_reserve_lru);
1153 			drm_vma_node_unmap(&nvbo->bo.base.vma_node,
1154 					   bdev->dev_mapping);
1155 			nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem);
1156 			goto retry;
1157 		}
1158 
1159 	}
1160 	mutex_unlock(&drm->ttm.io_reserve_mutex);
1161 	return ret;
1162 }
1163 
1164 static void
1165 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg)
1166 {
1167 	struct nouveau_drm *drm = nouveau_bdev(bdev);
1168 
1169 	mutex_lock(&drm->ttm.io_reserve_mutex);
1170 	nouveau_ttm_io_mem_free_locked(drm, reg);
1171 	mutex_unlock(&drm->ttm.io_reserve_mutex);
1172 }
1173 
1174 static int
1175 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1176 {
1177 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1178 	struct nouveau_bo *nvbo = nouveau_bo(bo);
1179 	struct nvkm_device *device = nvxx_device(&drm->client.device);
1180 	u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
1181 	int i, ret;
1182 
1183 	/* as long as the bo isn't in vram, and isn't tiled, we've got
1184 	 * nothing to do here.
1185 	 */
1186 	if (bo->mem.mem_type != TTM_PL_VRAM) {
1187 		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
1188 		    !nvbo->kind)
1189 			return 0;
1190 
1191 		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1192 			nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
1193 
1194 			ret = nouveau_bo_validate(nvbo, false, false);
1195 			if (ret)
1196 				return ret;
1197 		}
1198 		return 0;
1199 	}
1200 
1201 	/* make sure bo is in mappable vram */
1202 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
1203 	    bo->mem.start + bo->mem.num_pages < mappable)
1204 		return 0;
1205 
1206 	for (i = 0; i < nvbo->placement.num_placement; ++i) {
1207 		nvbo->placements[i].fpfn = 0;
1208 		nvbo->placements[i].lpfn = mappable;
1209 	}
1210 
1211 	for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
1212 		nvbo->busy_placements[i].fpfn = 0;
1213 		nvbo->busy_placements[i].lpfn = mappable;
1214 	}
1215 
1216 	nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1217 	return nouveau_bo_validate(nvbo, false, false);
1218 }
1219 
1220 static int
1221 nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
1222 			struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
1223 {
1224 	struct ttm_dma_tt *ttm_dma = (void *)ttm;
1225 	struct nouveau_drm *drm;
1226 	struct device *dev;
1227 	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1228 
1229 	if (ttm->state != tt_unpopulated)
1230 		return 0;
1231 
1232 	if (slave && ttm->sg) {
1233 		/* make userspace faulting work */
1234 		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1235 						 ttm_dma->dma_address, ttm->num_pages);
1236 		ttm->state = tt_unbound;
1237 		return 0;
1238 	}
1239 
1240 	drm = nouveau_bdev(bdev);
1241 	dev = drm->dev->dev;
1242 
1243 #if IS_ENABLED(CONFIG_AGP)
1244 	if (drm->agp.bridge) {
1245 		return ttm_agp_tt_populate(bdev, ttm, ctx);
1246 	}
1247 #endif
1248 
1249 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
1250 	if (swiotlb_nr_tbl()) {
1251 		return ttm_dma_populate((void *)ttm, dev, ctx);
1252 	}
1253 #endif
1254 	return ttm_populate_and_map_pages(dev, ttm_dma, ctx);
1255 }
1256 
1257 static void
1258 nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
1259 			  struct ttm_tt *ttm)
1260 {
1261 	struct ttm_dma_tt *ttm_dma = (void *)ttm;
1262 	struct nouveau_drm *drm;
1263 	struct device *dev;
1264 	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1265 
1266 	if (slave)
1267 		return;
1268 
1269 	drm = nouveau_bdev(bdev);
1270 	dev = drm->dev->dev;
1271 
1272 #if IS_ENABLED(CONFIG_AGP)
1273 	if (drm->agp.bridge) {
1274 		ttm_agp_tt_unpopulate(bdev, ttm);
1275 		return;
1276 	}
1277 #endif
1278 
1279 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
1280 	if (swiotlb_nr_tbl()) {
1281 		ttm_dma_unpopulate((void *)ttm, dev);
1282 		return;
1283 	}
1284 #endif
1285 
1286 	ttm_unmap_and_unpopulate_pages(dev, ttm_dma);
1287 }
1288 
1289 void
1290 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
1291 {
1292 	struct dma_resv *resv = nvbo->bo.base.resv;
1293 
1294 	if (exclusive)
1295 		dma_resv_add_excl_fence(resv, &fence->base);
1296 	else if (fence)
1297 		dma_resv_add_shared_fence(resv, &fence->base);
1298 }
1299 
1300 struct ttm_bo_driver nouveau_bo_driver = {
1301 	.ttm_tt_create = &nouveau_ttm_tt_create,
1302 	.ttm_tt_populate = &nouveau_ttm_tt_populate,
1303 	.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1304 	.eviction_valuable = ttm_bo_eviction_valuable,
1305 	.evict_flags = nouveau_bo_evict_flags,
1306 	.move_notify = nouveau_bo_move_ntfy,
1307 	.move = nouveau_bo_move,
1308 	.verify_access = nouveau_bo_verify_access,
1309 	.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1310 	.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1311 	.io_mem_free = &nouveau_ttm_io_mem_free,
1312 };
1313