1 /*
2  * Copyright 2007 Dave Airlied
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 /*
25  * Authors: Dave Airlied <airlied@linux.ie>
26  *	    Ben Skeggs   <darktama@iinet.net.au>
27  *	    Jeremy Kolb  <jkolb@brandeis.edu>
28  */
29 
30 #include <core/engine.h>
31 #include <linux/swiotlb.h>
32 
33 #include <subdev/fb.h>
34 #include <subdev/vm.h>
35 #include <subdev/bar.h>
36 
37 #include "nouveau_drm.h"
38 #include "nouveau_dma.h"
39 #include "nouveau_fence.h"
40 
41 #include "nouveau_bo.h"
42 #include "nouveau_ttm.h"
43 #include "nouveau_gem.h"
44 
45 /*
46  * NV10-NV40 tiling helpers
47  */
48 
49 static void
50 nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
51 			   u32 addr, u32 size, u32 pitch, u32 flags)
52 {
53 	struct nouveau_drm *drm = nouveau_drm(dev);
54 	int i = reg - drm->tile.reg;
55 	struct nouveau_fb *pfb = nouveau_fb(drm->device);
56 	struct nouveau_fb_tile *tile = &pfb->tile.region[i];
57 	struct nouveau_engine *engine;
58 
59 	nouveau_fence_unref(&reg->fence);
60 
61 	if (tile->pitch)
62 		pfb->tile.fini(pfb, i, tile);
63 
64 	if (pitch)
65 		pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
66 
67 	pfb->tile.prog(pfb, i, tile);
68 
69 	if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR)))
70 		engine->tile_prog(engine, i);
71 	if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG)))
72 		engine->tile_prog(engine, i);
73 }
74 
75 static struct nouveau_drm_tile *
76 nv10_bo_get_tile_region(struct drm_device *dev, int i)
77 {
78 	struct nouveau_drm *drm = nouveau_drm(dev);
79 	struct nouveau_drm_tile *tile = &drm->tile.reg[i];
80 
81 	spin_lock(&drm->tile.lock);
82 
83 	if (!tile->used &&
84 	    (!tile->fence || nouveau_fence_done(tile->fence)))
85 		tile->used = true;
86 	else
87 		tile = NULL;
88 
89 	spin_unlock(&drm->tile.lock);
90 	return tile;
91 }
92 
93 static void
94 nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
95 			struct nouveau_fence *fence)
96 {
97 	struct nouveau_drm *drm = nouveau_drm(dev);
98 
99 	if (tile) {
100 		spin_lock(&drm->tile.lock);
101 		if (fence) {
102 			/* Mark it as pending. */
103 			tile->fence = fence;
104 			nouveau_fence_ref(fence);
105 		}
106 
107 		tile->used = false;
108 		spin_unlock(&drm->tile.lock);
109 	}
110 }
111 
112 static struct nouveau_drm_tile *
113 nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
114 		   u32 size, u32 pitch, u32 flags)
115 {
116 	struct nouveau_drm *drm = nouveau_drm(dev);
117 	struct nouveau_fb *pfb = nouveau_fb(drm->device);
118 	struct nouveau_drm_tile *tile, *found = NULL;
119 	int i;
120 
121 	for (i = 0; i < pfb->tile.regions; i++) {
122 		tile = nv10_bo_get_tile_region(dev, i);
123 
124 		if (pitch && !found) {
125 			found = tile;
126 			continue;
127 
128 		} else if (tile && pfb->tile.region[i].pitch) {
129 			/* Kill an unused tile region. */
130 			nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
131 		}
132 
133 		nv10_bo_put_tile_region(dev, tile, NULL);
134 	}
135 
136 	if (found)
137 		nv10_bo_update_tile_region(dev, found, addr, size,
138 					    pitch, flags);
139 	return found;
140 }
141 
142 static void
143 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
144 {
145 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
146 	struct drm_device *dev = drm->dev;
147 	struct nouveau_bo *nvbo = nouveau_bo(bo);
148 
149 	if (unlikely(nvbo->gem))
150 		DRM_ERROR("bo %p still attached to GEM object\n", bo);
151 	WARN_ON(nvbo->pin_refcnt > 0);
152 	nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
153 	kfree(nvbo);
154 }
155 
156 static void
157 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
158 		       int *align, int *size)
159 {
160 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
161 	struct nouveau_device *device = nv_device(drm->device);
162 
163 	if (device->card_type < NV_50) {
164 		if (nvbo->tile_mode) {
165 			if (device->chipset >= 0x40) {
166 				*align = 65536;
167 				*size = roundup(*size, 64 * nvbo->tile_mode);
168 
169 			} else if (device->chipset >= 0x30) {
170 				*align = 32768;
171 				*size = roundup(*size, 64 * nvbo->tile_mode);
172 
173 			} else if (device->chipset >= 0x20) {
174 				*align = 16384;
175 				*size = roundup(*size, 64 * nvbo->tile_mode);
176 
177 			} else if (device->chipset >= 0x10) {
178 				*align = 16384;
179 				*size = roundup(*size, 32 * nvbo->tile_mode);
180 			}
181 		}
182 	} else {
183 		*size = roundup(*size, (1 << nvbo->page_shift));
184 		*align = max((1 <<  nvbo->page_shift), *align);
185 	}
186 
187 	*size = roundup(*size, PAGE_SIZE);
188 }
189 
190 int
191 nouveau_bo_new(struct drm_device *dev, int size, int align,
192 	       uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
193 	       struct sg_table *sg,
194 	       struct nouveau_bo **pnvbo)
195 {
196 	struct nouveau_drm *drm = nouveau_drm(dev);
197 	struct nouveau_bo *nvbo;
198 	size_t acc_size;
199 	int ret;
200 	int type = ttm_bo_type_device;
201 	int lpg_shift = 12;
202 	int max_size;
203 
204 	if (drm->client.base.vm)
205 		lpg_shift = drm->client.base.vm->vmm->lpg_shift;
206 	max_size = INT_MAX & ~((1 << lpg_shift) - 1);
207 
208 	if (size <= 0 || size > max_size) {
209 		nv_warn(drm, "skipped size %x\n", (u32)size);
210 		return -EINVAL;
211 	}
212 
213 	if (sg)
214 		type = ttm_bo_type_sg;
215 
216 	nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
217 	if (!nvbo)
218 		return -ENOMEM;
219 	INIT_LIST_HEAD(&nvbo->head);
220 	INIT_LIST_HEAD(&nvbo->entry);
221 	INIT_LIST_HEAD(&nvbo->vma_list);
222 	nvbo->tile_mode = tile_mode;
223 	nvbo->tile_flags = tile_flags;
224 	nvbo->bo.bdev = &drm->ttm.bdev;
225 
226 	nvbo->page_shift = 12;
227 	if (drm->client.base.vm) {
228 		if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
229 			nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift;
230 	}
231 
232 	nouveau_bo_fixup_align(nvbo, flags, &align, &size);
233 	nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
234 	nouveau_bo_placement_set(nvbo, flags, 0);
235 
236 	acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
237 				       sizeof(struct nouveau_bo));
238 
239 	ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
240 			  type, &nvbo->placement,
241 			  align >> PAGE_SHIFT, false, NULL, acc_size, sg,
242 			  nouveau_bo_del_ttm);
243 	if (ret) {
244 		/* ttm will call nouveau_bo_del_ttm if it fails.. */
245 		return ret;
246 	}
247 
248 	*pnvbo = nvbo;
249 	return 0;
250 }
251 
252 static void
253 set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
254 {
255 	*n = 0;
256 
257 	if (type & TTM_PL_FLAG_VRAM)
258 		pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
259 	if (type & TTM_PL_FLAG_TT)
260 		pl[(*n)++] = TTM_PL_FLAG_TT | flags;
261 	if (type & TTM_PL_FLAG_SYSTEM)
262 		pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
263 }
264 
265 static void
266 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
267 {
268 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
269 	struct nouveau_fb *pfb = nouveau_fb(drm->device);
270 	u32 vram_pages = pfb->ram->size >> PAGE_SHIFT;
271 
272 	if (nv_device(drm->device)->card_type == NV_10 &&
273 	    nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
274 	    nvbo->bo.mem.num_pages < vram_pages / 4) {
275 		/*
276 		 * Make sure that the color and depth buffers are handled
277 		 * by independent memory controller units. Up to a 9x
278 		 * speed up when alpha-blending and depth-test are enabled
279 		 * at the same time.
280 		 */
281 		if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
282 			nvbo->placement.fpfn = vram_pages / 2;
283 			nvbo->placement.lpfn = ~0;
284 		} else {
285 			nvbo->placement.fpfn = 0;
286 			nvbo->placement.lpfn = vram_pages / 2;
287 		}
288 	}
289 }
290 
291 void
292 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
293 {
294 	struct ttm_placement *pl = &nvbo->placement;
295 	uint32_t flags = TTM_PL_MASK_CACHING |
296 		(nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
297 
298 	pl->placement = nvbo->placements;
299 	set_placement_list(nvbo->placements, &pl->num_placement,
300 			   type, flags);
301 
302 	pl->busy_placement = nvbo->busy_placements;
303 	set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
304 			   type | busy, flags);
305 
306 	set_placement_range(nvbo, type);
307 }
308 
309 int
310 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
311 {
312 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
313 	struct ttm_buffer_object *bo = &nvbo->bo;
314 	int ret;
315 
316 	ret = ttm_bo_reserve(bo, false, false, false, 0);
317 	if (ret)
318 		goto out;
319 
320 	if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
321 		NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
322 			 1 << bo->mem.mem_type, memtype);
323 		ret = -EINVAL;
324 		goto out;
325 	}
326 
327 	if (nvbo->pin_refcnt++)
328 		goto out;
329 
330 	nouveau_bo_placement_set(nvbo, memtype, 0);
331 
332 	ret = nouveau_bo_validate(nvbo, false, false);
333 	if (ret == 0) {
334 		switch (bo->mem.mem_type) {
335 		case TTM_PL_VRAM:
336 			drm->gem.vram_available -= bo->mem.size;
337 			break;
338 		case TTM_PL_TT:
339 			drm->gem.gart_available -= bo->mem.size;
340 			break;
341 		default:
342 			break;
343 		}
344 	}
345 out:
346 	ttm_bo_unreserve(bo);
347 	return ret;
348 }
349 
350 int
351 nouveau_bo_unpin(struct nouveau_bo *nvbo)
352 {
353 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
354 	struct ttm_buffer_object *bo = &nvbo->bo;
355 	int ret, ref;
356 
357 	ret = ttm_bo_reserve(bo, false, false, false, 0);
358 	if (ret)
359 		return ret;
360 
361 	ref = --nvbo->pin_refcnt;
362 	WARN_ON_ONCE(ref < 0);
363 	if (ref)
364 		goto out;
365 
366 	nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
367 
368 	ret = nouveau_bo_validate(nvbo, false, false);
369 	if (ret == 0) {
370 		switch (bo->mem.mem_type) {
371 		case TTM_PL_VRAM:
372 			drm->gem.vram_available += bo->mem.size;
373 			break;
374 		case TTM_PL_TT:
375 			drm->gem.gart_available += bo->mem.size;
376 			break;
377 		default:
378 			break;
379 		}
380 	}
381 
382 out:
383 	ttm_bo_unreserve(bo);
384 	return ret;
385 }
386 
387 int
388 nouveau_bo_map(struct nouveau_bo *nvbo)
389 {
390 	int ret;
391 
392 	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
393 	if (ret)
394 		return ret;
395 
396 	ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
397 	ttm_bo_unreserve(&nvbo->bo);
398 	return ret;
399 }
400 
401 void
402 nouveau_bo_unmap(struct nouveau_bo *nvbo)
403 {
404 	if (nvbo)
405 		ttm_bo_kunmap(&nvbo->kmap);
406 }
407 
408 int
409 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
410 		    bool no_wait_gpu)
411 {
412 	int ret;
413 
414 	ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
415 			      interruptible, no_wait_gpu);
416 	if (ret)
417 		return ret;
418 
419 	return 0;
420 }
421 
422 u16
423 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
424 {
425 	bool is_iomem;
426 	u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
427 	mem = &mem[index];
428 	if (is_iomem)
429 		return ioread16_native((void __force __iomem *)mem);
430 	else
431 		return *mem;
432 }
433 
434 void
435 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
436 {
437 	bool is_iomem;
438 	u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
439 	mem = &mem[index];
440 	if (is_iomem)
441 		iowrite16_native(val, (void __force __iomem *)mem);
442 	else
443 		*mem = val;
444 }
445 
446 u32
447 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
448 {
449 	bool is_iomem;
450 	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
451 	mem = &mem[index];
452 	if (is_iomem)
453 		return ioread32_native((void __force __iomem *)mem);
454 	else
455 		return *mem;
456 }
457 
458 void
459 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
460 {
461 	bool is_iomem;
462 	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
463 	mem = &mem[index];
464 	if (is_iomem)
465 		iowrite32_native(val, (void __force __iomem *)mem);
466 	else
467 		*mem = val;
468 }
469 
470 static struct ttm_tt *
471 nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
472 		      uint32_t page_flags, struct page *dummy_read)
473 {
474 #if __OS_HAS_AGP
475 	struct nouveau_drm *drm = nouveau_bdev(bdev);
476 	struct drm_device *dev = drm->dev;
477 
478 	if (drm->agp.stat == ENABLED) {
479 		return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
480 					 page_flags, dummy_read);
481 	}
482 #endif
483 
484 	return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
485 }
486 
487 static int
488 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
489 {
490 	/* We'll do this from user space. */
491 	return 0;
492 }
493 
494 static int
495 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
496 			 struct ttm_mem_type_manager *man)
497 {
498 	struct nouveau_drm *drm = nouveau_bdev(bdev);
499 
500 	switch (type) {
501 	case TTM_PL_SYSTEM:
502 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
503 		man->available_caching = TTM_PL_MASK_CACHING;
504 		man->default_caching = TTM_PL_FLAG_CACHED;
505 		break;
506 	case TTM_PL_VRAM:
507 		if (nv_device(drm->device)->card_type >= NV_50) {
508 			man->func = &nouveau_vram_manager;
509 			man->io_reserve_fastpath = false;
510 			man->use_io_reserve_lru = true;
511 		} else {
512 			man->func = &ttm_bo_manager_func;
513 		}
514 		man->flags = TTM_MEMTYPE_FLAG_FIXED |
515 			     TTM_MEMTYPE_FLAG_MAPPABLE;
516 		man->available_caching = TTM_PL_FLAG_UNCACHED |
517 					 TTM_PL_FLAG_WC;
518 		man->default_caching = TTM_PL_FLAG_WC;
519 		break;
520 	case TTM_PL_TT:
521 		if (nv_device(drm->device)->card_type >= NV_50)
522 			man->func = &nouveau_gart_manager;
523 		else
524 		if (drm->agp.stat != ENABLED)
525 			man->func = &nv04_gart_manager;
526 		else
527 			man->func = &ttm_bo_manager_func;
528 
529 		if (drm->agp.stat == ENABLED) {
530 			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
531 			man->available_caching = TTM_PL_FLAG_UNCACHED |
532 				TTM_PL_FLAG_WC;
533 			man->default_caching = TTM_PL_FLAG_WC;
534 		} else {
535 			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
536 				     TTM_MEMTYPE_FLAG_CMA;
537 			man->available_caching = TTM_PL_MASK_CACHING;
538 			man->default_caching = TTM_PL_FLAG_CACHED;
539 		}
540 
541 		break;
542 	default:
543 		return -EINVAL;
544 	}
545 	return 0;
546 }
547 
548 static void
549 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
550 {
551 	struct nouveau_bo *nvbo = nouveau_bo(bo);
552 
553 	switch (bo->mem.mem_type) {
554 	case TTM_PL_VRAM:
555 		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
556 					 TTM_PL_FLAG_SYSTEM);
557 		break;
558 	default:
559 		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
560 		break;
561 	}
562 
563 	*pl = nvbo->placement;
564 }
565 
566 
567 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
568  * TTM_PL_{VRAM,TT} directly.
569  */
570 
571 static int
572 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
573 			      struct nouveau_bo *nvbo, bool evict,
574 			      bool no_wait_gpu, struct ttm_mem_reg *new_mem)
575 {
576 	struct nouveau_fence *fence = NULL;
577 	int ret;
578 
579 	ret = nouveau_fence_new(chan, false, &fence);
580 	if (ret)
581 		return ret;
582 
583 	ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict,
584 					no_wait_gpu, new_mem);
585 	nouveau_fence_unref(&fence);
586 	return ret;
587 }
588 
589 static int
590 nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
591 {
592 	int ret = RING_SPACE(chan, 2);
593 	if (ret == 0) {
594 		BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
595 		OUT_RING  (chan, handle & 0x0000ffff);
596 		FIRE_RING (chan);
597 	}
598 	return ret;
599 }
600 
601 static int
602 nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
603 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
604 {
605 	struct nouveau_mem *node = old_mem->mm_node;
606 	int ret = RING_SPACE(chan, 10);
607 	if (ret == 0) {
608 		BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
609 		OUT_RING  (chan, upper_32_bits(node->vma[0].offset));
610 		OUT_RING  (chan, lower_32_bits(node->vma[0].offset));
611 		OUT_RING  (chan, upper_32_bits(node->vma[1].offset));
612 		OUT_RING  (chan, lower_32_bits(node->vma[1].offset));
613 		OUT_RING  (chan, PAGE_SIZE);
614 		OUT_RING  (chan, PAGE_SIZE);
615 		OUT_RING  (chan, PAGE_SIZE);
616 		OUT_RING  (chan, new_mem->num_pages);
617 		BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
618 	}
619 	return ret;
620 }
621 
622 static int
623 nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
624 {
625 	int ret = RING_SPACE(chan, 2);
626 	if (ret == 0) {
627 		BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
628 		OUT_RING  (chan, handle);
629 	}
630 	return ret;
631 }
632 
633 static int
634 nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
635 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
636 {
637 	struct nouveau_mem *node = old_mem->mm_node;
638 	u64 src_offset = node->vma[0].offset;
639 	u64 dst_offset = node->vma[1].offset;
640 	u32 page_count = new_mem->num_pages;
641 	int ret;
642 
643 	page_count = new_mem->num_pages;
644 	while (page_count) {
645 		int line_count = (page_count > 8191) ? 8191 : page_count;
646 
647 		ret = RING_SPACE(chan, 11);
648 		if (ret)
649 			return ret;
650 
651 		BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
652 		OUT_RING  (chan, upper_32_bits(src_offset));
653 		OUT_RING  (chan, lower_32_bits(src_offset));
654 		OUT_RING  (chan, upper_32_bits(dst_offset));
655 		OUT_RING  (chan, lower_32_bits(dst_offset));
656 		OUT_RING  (chan, PAGE_SIZE);
657 		OUT_RING  (chan, PAGE_SIZE);
658 		OUT_RING  (chan, PAGE_SIZE);
659 		OUT_RING  (chan, line_count);
660 		BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
661 		OUT_RING  (chan, 0x00000110);
662 
663 		page_count -= line_count;
664 		src_offset += (PAGE_SIZE * line_count);
665 		dst_offset += (PAGE_SIZE * line_count);
666 	}
667 
668 	return 0;
669 }
670 
671 static int
672 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
673 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
674 {
675 	struct nouveau_mem *node = old_mem->mm_node;
676 	u64 src_offset = node->vma[0].offset;
677 	u64 dst_offset = node->vma[1].offset;
678 	u32 page_count = new_mem->num_pages;
679 	int ret;
680 
681 	page_count = new_mem->num_pages;
682 	while (page_count) {
683 		int line_count = (page_count > 2047) ? 2047 : page_count;
684 
685 		ret = RING_SPACE(chan, 12);
686 		if (ret)
687 			return ret;
688 
689 		BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
690 		OUT_RING  (chan, upper_32_bits(dst_offset));
691 		OUT_RING  (chan, lower_32_bits(dst_offset));
692 		BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
693 		OUT_RING  (chan, upper_32_bits(src_offset));
694 		OUT_RING  (chan, lower_32_bits(src_offset));
695 		OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
696 		OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
697 		OUT_RING  (chan, PAGE_SIZE); /* line_length */
698 		OUT_RING  (chan, line_count);
699 		BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
700 		OUT_RING  (chan, 0x00100110);
701 
702 		page_count -= line_count;
703 		src_offset += (PAGE_SIZE * line_count);
704 		dst_offset += (PAGE_SIZE * line_count);
705 	}
706 
707 	return 0;
708 }
709 
710 static int
711 nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
712 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
713 {
714 	struct nouveau_mem *node = old_mem->mm_node;
715 	u64 src_offset = node->vma[0].offset;
716 	u64 dst_offset = node->vma[1].offset;
717 	u32 page_count = new_mem->num_pages;
718 	int ret;
719 
720 	page_count = new_mem->num_pages;
721 	while (page_count) {
722 		int line_count = (page_count > 8191) ? 8191 : page_count;
723 
724 		ret = RING_SPACE(chan, 11);
725 		if (ret)
726 			return ret;
727 
728 		BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
729 		OUT_RING  (chan, upper_32_bits(src_offset));
730 		OUT_RING  (chan, lower_32_bits(src_offset));
731 		OUT_RING  (chan, upper_32_bits(dst_offset));
732 		OUT_RING  (chan, lower_32_bits(dst_offset));
733 		OUT_RING  (chan, PAGE_SIZE);
734 		OUT_RING  (chan, PAGE_SIZE);
735 		OUT_RING  (chan, PAGE_SIZE);
736 		OUT_RING  (chan, line_count);
737 		BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
738 		OUT_RING  (chan, 0x00000110);
739 
740 		page_count -= line_count;
741 		src_offset += (PAGE_SIZE * line_count);
742 		dst_offset += (PAGE_SIZE * line_count);
743 	}
744 
745 	return 0;
746 }
747 
748 static int
749 nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
750 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
751 {
752 	struct nouveau_mem *node = old_mem->mm_node;
753 	int ret = RING_SPACE(chan, 7);
754 	if (ret == 0) {
755 		BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
756 		OUT_RING  (chan, upper_32_bits(node->vma[0].offset));
757 		OUT_RING  (chan, lower_32_bits(node->vma[0].offset));
758 		OUT_RING  (chan, upper_32_bits(node->vma[1].offset));
759 		OUT_RING  (chan, lower_32_bits(node->vma[1].offset));
760 		OUT_RING  (chan, 0x00000000 /* COPY */);
761 		OUT_RING  (chan, new_mem->num_pages << PAGE_SHIFT);
762 	}
763 	return ret;
764 }
765 
766 static int
767 nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
768 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
769 {
770 	struct nouveau_mem *node = old_mem->mm_node;
771 	int ret = RING_SPACE(chan, 7);
772 	if (ret == 0) {
773 		BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
774 		OUT_RING  (chan, new_mem->num_pages << PAGE_SHIFT);
775 		OUT_RING  (chan, upper_32_bits(node->vma[0].offset));
776 		OUT_RING  (chan, lower_32_bits(node->vma[0].offset));
777 		OUT_RING  (chan, upper_32_bits(node->vma[1].offset));
778 		OUT_RING  (chan, lower_32_bits(node->vma[1].offset));
779 		OUT_RING  (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
780 	}
781 	return ret;
782 }
783 
784 static int
785 nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
786 {
787 	int ret = RING_SPACE(chan, 6);
788 	if (ret == 0) {
789 		BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
790 		OUT_RING  (chan, handle);
791 		BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
792 		OUT_RING  (chan, NvNotify0);
793 		OUT_RING  (chan, NvDmaFB);
794 		OUT_RING  (chan, NvDmaFB);
795 	}
796 
797 	return ret;
798 }
799 
800 static int
801 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
802 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
803 {
804 	struct nouveau_mem *node = old_mem->mm_node;
805 	struct nouveau_bo *nvbo = nouveau_bo(bo);
806 	u64 length = (new_mem->num_pages << PAGE_SHIFT);
807 	u64 src_offset = node->vma[0].offset;
808 	u64 dst_offset = node->vma[1].offset;
809 	int ret;
810 
811 	while (length) {
812 		u32 amount, stride, height;
813 
814 		amount  = min(length, (u64)(4 * 1024 * 1024));
815 		stride  = 16 * 4;
816 		height  = amount / stride;
817 
818 		if (old_mem->mem_type == TTM_PL_VRAM &&
819 		    nouveau_bo_tile_layout(nvbo)) {
820 			ret = RING_SPACE(chan, 8);
821 			if (ret)
822 				return ret;
823 
824 			BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
825 			OUT_RING  (chan, 0);
826 			OUT_RING  (chan, 0);
827 			OUT_RING  (chan, stride);
828 			OUT_RING  (chan, height);
829 			OUT_RING  (chan, 1);
830 			OUT_RING  (chan, 0);
831 			OUT_RING  (chan, 0);
832 		} else {
833 			ret = RING_SPACE(chan, 2);
834 			if (ret)
835 				return ret;
836 
837 			BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
838 			OUT_RING  (chan, 1);
839 		}
840 		if (new_mem->mem_type == TTM_PL_VRAM &&
841 		    nouveau_bo_tile_layout(nvbo)) {
842 			ret = RING_SPACE(chan, 8);
843 			if (ret)
844 				return ret;
845 
846 			BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
847 			OUT_RING  (chan, 0);
848 			OUT_RING  (chan, 0);
849 			OUT_RING  (chan, stride);
850 			OUT_RING  (chan, height);
851 			OUT_RING  (chan, 1);
852 			OUT_RING  (chan, 0);
853 			OUT_RING  (chan, 0);
854 		} else {
855 			ret = RING_SPACE(chan, 2);
856 			if (ret)
857 				return ret;
858 
859 			BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
860 			OUT_RING  (chan, 1);
861 		}
862 
863 		ret = RING_SPACE(chan, 14);
864 		if (ret)
865 			return ret;
866 
867 		BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
868 		OUT_RING  (chan, upper_32_bits(src_offset));
869 		OUT_RING  (chan, upper_32_bits(dst_offset));
870 		BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
871 		OUT_RING  (chan, lower_32_bits(src_offset));
872 		OUT_RING  (chan, lower_32_bits(dst_offset));
873 		OUT_RING  (chan, stride);
874 		OUT_RING  (chan, stride);
875 		OUT_RING  (chan, stride);
876 		OUT_RING  (chan, height);
877 		OUT_RING  (chan, 0x00000101);
878 		OUT_RING  (chan, 0x00000000);
879 		BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
880 		OUT_RING  (chan, 0);
881 
882 		length -= amount;
883 		src_offset += amount;
884 		dst_offset += amount;
885 	}
886 
887 	return 0;
888 }
889 
890 static int
891 nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
892 {
893 	int ret = RING_SPACE(chan, 4);
894 	if (ret == 0) {
895 		BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
896 		OUT_RING  (chan, handle);
897 		BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
898 		OUT_RING  (chan, NvNotify0);
899 	}
900 
901 	return ret;
902 }
903 
904 static inline uint32_t
905 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
906 		      struct nouveau_channel *chan, struct ttm_mem_reg *mem)
907 {
908 	if (mem->mem_type == TTM_PL_TT)
909 		return NvDmaTT;
910 	return NvDmaFB;
911 }
912 
913 static int
914 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
915 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
916 {
917 	u32 src_offset = old_mem->start << PAGE_SHIFT;
918 	u32 dst_offset = new_mem->start << PAGE_SHIFT;
919 	u32 page_count = new_mem->num_pages;
920 	int ret;
921 
922 	ret = RING_SPACE(chan, 3);
923 	if (ret)
924 		return ret;
925 
926 	BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
927 	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
928 	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
929 
930 	page_count = new_mem->num_pages;
931 	while (page_count) {
932 		int line_count = (page_count > 2047) ? 2047 : page_count;
933 
934 		ret = RING_SPACE(chan, 11);
935 		if (ret)
936 			return ret;
937 
938 		BEGIN_NV04(chan, NvSubCopy,
939 				 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
940 		OUT_RING  (chan, src_offset);
941 		OUT_RING  (chan, dst_offset);
942 		OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
943 		OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
944 		OUT_RING  (chan, PAGE_SIZE); /* line_length */
945 		OUT_RING  (chan, line_count);
946 		OUT_RING  (chan, 0x00000101);
947 		OUT_RING  (chan, 0x00000000);
948 		BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
949 		OUT_RING  (chan, 0);
950 
951 		page_count -= line_count;
952 		src_offset += (PAGE_SIZE * line_count);
953 		dst_offset += (PAGE_SIZE * line_count);
954 	}
955 
956 	return 0;
957 }
958 
959 static int
960 nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
961 		   struct ttm_mem_reg *mem, struct nouveau_vma *vma)
962 {
963 	struct nouveau_mem *node = mem->mm_node;
964 	int ret;
965 
966 	ret = nouveau_vm_get(nv_client(chan->cli)->vm, mem->num_pages <<
967 			     PAGE_SHIFT, node->page_shift,
968 			     NV_MEM_ACCESS_RW, vma);
969 	if (ret)
970 		return ret;
971 
972 	if (mem->mem_type == TTM_PL_VRAM)
973 		nouveau_vm_map(vma, node);
974 	else
975 		nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
976 
977 	return 0;
978 }
979 
980 static int
981 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
982 		     bool no_wait_gpu, struct ttm_mem_reg *new_mem)
983 {
984 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
985 	struct nouveau_channel *chan = chan = drm->ttm.chan;
986 	struct nouveau_bo *nvbo = nouveau_bo(bo);
987 	struct ttm_mem_reg *old_mem = &bo->mem;
988 	int ret;
989 
990 	mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING);
991 
992 	/* create temporary vmas for the transfer and attach them to the
993 	 * old nouveau_mem node, these will get cleaned up after ttm has
994 	 * destroyed the ttm_mem_reg
995 	 */
996 	if (nv_device(drm->device)->card_type >= NV_50) {
997 		struct nouveau_mem *node = old_mem->mm_node;
998 
999 		ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
1000 		if (ret)
1001 			goto out;
1002 
1003 		ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
1004 		if (ret)
1005 			goto out;
1006 	}
1007 
1008 	ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
1009 	if (ret == 0) {
1010 		ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
1011 						    no_wait_gpu, new_mem);
1012 	}
1013 
1014 out:
1015 	mutex_unlock(&chan->cli->mutex);
1016 	return ret;
1017 }
1018 
1019 void
1020 nouveau_bo_move_init(struct nouveau_drm *drm)
1021 {
1022 	static const struct {
1023 		const char *name;
1024 		int engine;
1025 		u32 oclass;
1026 		int (*exec)(struct nouveau_channel *,
1027 			    struct ttm_buffer_object *,
1028 			    struct ttm_mem_reg *, struct ttm_mem_reg *);
1029 		int (*init)(struct nouveau_channel *, u32 handle);
1030 	} _methods[] = {
1031 		{  "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
1032 		{  "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1033 		{ "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1034 		{ "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1035 		{  "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1036 		{ "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1037 		{  "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1038 		{  "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1039 		{  "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
1040 		{},
1041 		{ "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
1042 	}, *mthd = _methods;
1043 	const char *name = "CPU";
1044 	int ret;
1045 
1046 	do {
1047 		struct nouveau_object *object;
1048 		struct nouveau_channel *chan;
1049 		u32 handle = (mthd->engine << 16) | mthd->oclass;
1050 
1051 		if (mthd->engine)
1052 			chan = drm->cechan;
1053 		else
1054 			chan = drm->channel;
1055 		if (chan == NULL)
1056 			continue;
1057 
1058 		ret = nouveau_object_new(nv_object(drm), chan->handle, handle,
1059 					 mthd->oclass, NULL, 0, &object);
1060 		if (ret == 0) {
1061 			ret = mthd->init(chan, handle);
1062 			if (ret) {
1063 				nouveau_object_del(nv_object(drm),
1064 						   chan->handle, handle);
1065 				continue;
1066 			}
1067 
1068 			drm->ttm.move = mthd->exec;
1069 			drm->ttm.chan = chan;
1070 			name = mthd->name;
1071 			break;
1072 		}
1073 	} while ((++mthd)->exec);
1074 
1075 	NV_INFO(drm, "MM: using %s for buffer copies\n", name);
1076 }
1077 
1078 static int
1079 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1080 		      bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1081 {
1082 	u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1083 	struct ttm_placement placement;
1084 	struct ttm_mem_reg tmp_mem;
1085 	int ret;
1086 
1087 	placement.fpfn = placement.lpfn = 0;
1088 	placement.num_placement = placement.num_busy_placement = 1;
1089 	placement.placement = placement.busy_placement = &placement_memtype;
1090 
1091 	tmp_mem = *new_mem;
1092 	tmp_mem.mm_node = NULL;
1093 	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
1094 	if (ret)
1095 		return ret;
1096 
1097 	ret = ttm_tt_bind(bo->ttm, &tmp_mem);
1098 	if (ret)
1099 		goto out;
1100 
1101 	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
1102 	if (ret)
1103 		goto out;
1104 
1105 	ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
1106 out:
1107 	ttm_bo_mem_put(bo, &tmp_mem);
1108 	return ret;
1109 }
1110 
1111 static int
1112 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1113 		      bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1114 {
1115 	u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1116 	struct ttm_placement placement;
1117 	struct ttm_mem_reg tmp_mem;
1118 	int ret;
1119 
1120 	placement.fpfn = placement.lpfn = 0;
1121 	placement.num_placement = placement.num_busy_placement = 1;
1122 	placement.placement = placement.busy_placement = &placement_memtype;
1123 
1124 	tmp_mem = *new_mem;
1125 	tmp_mem.mm_node = NULL;
1126 	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
1127 	if (ret)
1128 		return ret;
1129 
1130 	ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
1131 	if (ret)
1132 		goto out;
1133 
1134 	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
1135 	if (ret)
1136 		goto out;
1137 
1138 out:
1139 	ttm_bo_mem_put(bo, &tmp_mem);
1140 	return ret;
1141 }
1142 
1143 static void
1144 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1145 {
1146 	struct nouveau_bo *nvbo = nouveau_bo(bo);
1147 	struct nouveau_vma *vma;
1148 
1149 	/* ttm can now (stupidly) pass the driver bos it didn't create... */
1150 	if (bo->destroy != nouveau_bo_del_ttm)
1151 		return;
1152 
1153 	list_for_each_entry(vma, &nvbo->vma_list, head) {
1154 		if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
1155 			nouveau_vm_map(vma, new_mem->mm_node);
1156 		} else
1157 		if (new_mem && new_mem->mem_type == TTM_PL_TT &&
1158 		    nvbo->page_shift == vma->vm->vmm->spg_shift) {
1159 			if (((struct nouveau_mem *)new_mem->mm_node)->sg)
1160 				nouveau_vm_map_sg_table(vma, 0, new_mem->
1161 						  num_pages << PAGE_SHIFT,
1162 						  new_mem->mm_node);
1163 			else
1164 				nouveau_vm_map_sg(vma, 0, new_mem->
1165 						  num_pages << PAGE_SHIFT,
1166 						  new_mem->mm_node);
1167 		} else {
1168 			nouveau_vm_unmap(vma);
1169 		}
1170 	}
1171 }
1172 
1173 static int
1174 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
1175 		   struct nouveau_drm_tile **new_tile)
1176 {
1177 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1178 	struct drm_device *dev = drm->dev;
1179 	struct nouveau_bo *nvbo = nouveau_bo(bo);
1180 	u64 offset = new_mem->start << PAGE_SHIFT;
1181 
1182 	*new_tile = NULL;
1183 	if (new_mem->mem_type != TTM_PL_VRAM)
1184 		return 0;
1185 
1186 	if (nv_device(drm->device)->card_type >= NV_10) {
1187 		*new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
1188 						nvbo->tile_mode,
1189 						nvbo->tile_flags);
1190 	}
1191 
1192 	return 0;
1193 }
1194 
1195 static void
1196 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1197 		      struct nouveau_drm_tile *new_tile,
1198 		      struct nouveau_drm_tile **old_tile)
1199 {
1200 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1201 	struct drm_device *dev = drm->dev;
1202 
1203 	nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj);
1204 	*old_tile = new_tile;
1205 }
1206 
1207 static int
1208 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1209 		bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1210 {
1211 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1212 	struct nouveau_bo *nvbo = nouveau_bo(bo);
1213 	struct ttm_mem_reg *old_mem = &bo->mem;
1214 	struct nouveau_drm_tile *new_tile = NULL;
1215 	int ret = 0;
1216 
1217 	if (nv_device(drm->device)->card_type < NV_50) {
1218 		ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1219 		if (ret)
1220 			return ret;
1221 	}
1222 
1223 	/* Fake bo copy. */
1224 	if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1225 		BUG_ON(bo->mem.mm_node != NULL);
1226 		bo->mem = *new_mem;
1227 		new_mem->mm_node = NULL;
1228 		goto out;
1229 	}
1230 
1231 	/* CPU copy if we have no accelerated method available */
1232 	if (!drm->ttm.move) {
1233 		ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
1234 		goto out;
1235 	}
1236 
1237 	/* Hardware assisted copy. */
1238 	if (new_mem->mem_type == TTM_PL_SYSTEM)
1239 		ret = nouveau_bo_move_flipd(bo, evict, intr,
1240 					    no_wait_gpu, new_mem);
1241 	else if (old_mem->mem_type == TTM_PL_SYSTEM)
1242 		ret = nouveau_bo_move_flips(bo, evict, intr,
1243 					    no_wait_gpu, new_mem);
1244 	else
1245 		ret = nouveau_bo_move_m2mf(bo, evict, intr,
1246 					   no_wait_gpu, new_mem);
1247 
1248 	if (!ret)
1249 		goto out;
1250 
1251 	/* Fallback to software copy. */
1252 	ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
1253 
1254 out:
1255 	if (nv_device(drm->device)->card_type < NV_50) {
1256 		if (ret)
1257 			nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1258 		else
1259 			nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1260 	}
1261 
1262 	return ret;
1263 }
1264 
1265 static int
1266 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1267 {
1268 	struct nouveau_bo *nvbo = nouveau_bo(bo);
1269 
1270 	return drm_vma_node_verify_access(&nvbo->gem->vma_node, filp);
1271 }
1272 
1273 static int
1274 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1275 {
1276 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1277 	struct nouveau_drm *drm = nouveau_bdev(bdev);
1278 	struct drm_device *dev = drm->dev;
1279 	int ret;
1280 
1281 	mem->bus.addr = NULL;
1282 	mem->bus.offset = 0;
1283 	mem->bus.size = mem->num_pages << PAGE_SHIFT;
1284 	mem->bus.base = 0;
1285 	mem->bus.is_iomem = false;
1286 	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1287 		return -EINVAL;
1288 	switch (mem->mem_type) {
1289 	case TTM_PL_SYSTEM:
1290 		/* System memory */
1291 		return 0;
1292 	case TTM_PL_TT:
1293 #if __OS_HAS_AGP
1294 		if (drm->agp.stat == ENABLED) {
1295 			mem->bus.offset = mem->start << PAGE_SHIFT;
1296 			mem->bus.base = drm->agp.base;
1297 			mem->bus.is_iomem = !dev->agp->cant_use_aperture;
1298 		}
1299 #endif
1300 		break;
1301 	case TTM_PL_VRAM:
1302 		mem->bus.offset = mem->start << PAGE_SHIFT;
1303 		mem->bus.base = pci_resource_start(dev->pdev, 1);
1304 		mem->bus.is_iomem = true;
1305 		if (nv_device(drm->device)->card_type >= NV_50) {
1306 			struct nouveau_bar *bar = nouveau_bar(drm->device);
1307 			struct nouveau_mem *node = mem->mm_node;
1308 
1309 			ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
1310 					&node->bar_vma);
1311 			if (ret)
1312 				return ret;
1313 
1314 			mem->bus.offset = node->bar_vma.offset;
1315 		}
1316 		break;
1317 	default:
1318 		return -EINVAL;
1319 	}
1320 	return 0;
1321 }
1322 
1323 static void
1324 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1325 {
1326 	struct nouveau_drm *drm = nouveau_bdev(bdev);
1327 	struct nouveau_bar *bar = nouveau_bar(drm->device);
1328 	struct nouveau_mem *node = mem->mm_node;
1329 
1330 	if (!node->bar_vma.node)
1331 		return;
1332 
1333 	bar->unmap(bar, &node->bar_vma);
1334 }
1335 
1336 static int
1337 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1338 {
1339 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1340 	struct nouveau_bo *nvbo = nouveau_bo(bo);
1341 	struct nouveau_device *device = nv_device(drm->device);
1342 	u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT;
1343 
1344 	/* as long as the bo isn't in vram, and isn't tiled, we've got
1345 	 * nothing to do here.
1346 	 */
1347 	if (bo->mem.mem_type != TTM_PL_VRAM) {
1348 		if (nv_device(drm->device)->card_type < NV_50 ||
1349 		    !nouveau_bo_tile_layout(nvbo))
1350 			return 0;
1351 	}
1352 
1353 	/* make sure bo is in mappable vram */
1354 	if (bo->mem.start + bo->mem.num_pages < mappable)
1355 		return 0;
1356 
1357 
1358 	nvbo->placement.fpfn = 0;
1359 	nvbo->placement.lpfn = mappable;
1360 	nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1361 	return nouveau_bo_validate(nvbo, false, false);
1362 }
1363 
1364 static int
1365 nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1366 {
1367 	struct ttm_dma_tt *ttm_dma = (void *)ttm;
1368 	struct nouveau_drm *drm;
1369 	struct drm_device *dev;
1370 	unsigned i;
1371 	int r;
1372 	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1373 
1374 	if (ttm->state != tt_unpopulated)
1375 		return 0;
1376 
1377 	if (slave && ttm->sg) {
1378 		/* make userspace faulting work */
1379 		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1380 						 ttm_dma->dma_address, ttm->num_pages);
1381 		ttm->state = tt_unbound;
1382 		return 0;
1383 	}
1384 
1385 	drm = nouveau_bdev(ttm->bdev);
1386 	dev = drm->dev;
1387 
1388 #if __OS_HAS_AGP
1389 	if (drm->agp.stat == ENABLED) {
1390 		return ttm_agp_tt_populate(ttm);
1391 	}
1392 #endif
1393 
1394 #ifdef CONFIG_SWIOTLB
1395 	if (swiotlb_nr_tbl()) {
1396 		return ttm_dma_populate((void *)ttm, dev->dev);
1397 	}
1398 #endif
1399 
1400 	r = ttm_pool_populate(ttm);
1401 	if (r) {
1402 		return r;
1403 	}
1404 
1405 	for (i = 0; i < ttm->num_pages; i++) {
1406 		ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
1407 						   0, PAGE_SIZE,
1408 						   PCI_DMA_BIDIRECTIONAL);
1409 		if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
1410 			while (--i) {
1411 				pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
1412 					       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1413 				ttm_dma->dma_address[i] = 0;
1414 			}
1415 			ttm_pool_unpopulate(ttm);
1416 			return -EFAULT;
1417 		}
1418 	}
1419 	return 0;
1420 }
1421 
1422 static void
1423 nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1424 {
1425 	struct ttm_dma_tt *ttm_dma = (void *)ttm;
1426 	struct nouveau_drm *drm;
1427 	struct drm_device *dev;
1428 	unsigned i;
1429 	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1430 
1431 	if (slave)
1432 		return;
1433 
1434 	drm = nouveau_bdev(ttm->bdev);
1435 	dev = drm->dev;
1436 
1437 #if __OS_HAS_AGP
1438 	if (drm->agp.stat == ENABLED) {
1439 		ttm_agp_tt_unpopulate(ttm);
1440 		return;
1441 	}
1442 #endif
1443 
1444 #ifdef CONFIG_SWIOTLB
1445 	if (swiotlb_nr_tbl()) {
1446 		ttm_dma_unpopulate((void *)ttm, dev->dev);
1447 		return;
1448 	}
1449 #endif
1450 
1451 	for (i = 0; i < ttm->num_pages; i++) {
1452 		if (ttm_dma->dma_address[i]) {
1453 			pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
1454 				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1455 		}
1456 	}
1457 
1458 	ttm_pool_unpopulate(ttm);
1459 }
1460 
1461 void
1462 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1463 {
1464 	struct nouveau_fence *old_fence = NULL;
1465 
1466 	if (likely(fence))
1467 		nouveau_fence_ref(fence);
1468 
1469 	spin_lock(&nvbo->bo.bdev->fence_lock);
1470 	old_fence = nvbo->bo.sync_obj;
1471 	nvbo->bo.sync_obj = fence;
1472 	spin_unlock(&nvbo->bo.bdev->fence_lock);
1473 
1474 	nouveau_fence_unref(&old_fence);
1475 }
1476 
1477 static void
1478 nouveau_bo_fence_unref(void **sync_obj)
1479 {
1480 	nouveau_fence_unref((struct nouveau_fence **)sync_obj);
1481 }
1482 
1483 static void *
1484 nouveau_bo_fence_ref(void *sync_obj)
1485 {
1486 	return nouveau_fence_ref(sync_obj);
1487 }
1488 
1489 static bool
1490 nouveau_bo_fence_signalled(void *sync_obj)
1491 {
1492 	return nouveau_fence_done(sync_obj);
1493 }
1494 
1495 static int
1496 nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr)
1497 {
1498 	return nouveau_fence_wait(sync_obj, lazy, intr);
1499 }
1500 
1501 static int
1502 nouveau_bo_fence_flush(void *sync_obj)
1503 {
1504 	return 0;
1505 }
1506 
1507 struct ttm_bo_driver nouveau_bo_driver = {
1508 	.ttm_tt_create = &nouveau_ttm_tt_create,
1509 	.ttm_tt_populate = &nouveau_ttm_tt_populate,
1510 	.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1511 	.invalidate_caches = nouveau_bo_invalidate_caches,
1512 	.init_mem_type = nouveau_bo_init_mem_type,
1513 	.evict_flags = nouveau_bo_evict_flags,
1514 	.move_notify = nouveau_bo_move_ntfy,
1515 	.move = nouveau_bo_move,
1516 	.verify_access = nouveau_bo_verify_access,
1517 	.sync_obj_signaled = nouveau_bo_fence_signalled,
1518 	.sync_obj_wait = nouveau_bo_fence_wait,
1519 	.sync_obj_flush = nouveau_bo_fence_flush,
1520 	.sync_obj_unref = nouveau_bo_fence_unref,
1521 	.sync_obj_ref = nouveau_bo_fence_ref,
1522 	.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1523 	.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1524 	.io_mem_free = &nouveau_ttm_io_mem_free,
1525 };
1526 
1527 struct nouveau_vma *
1528 nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
1529 {
1530 	struct nouveau_vma *vma;
1531 	list_for_each_entry(vma, &nvbo->vma_list, head) {
1532 		if (vma->vm == vm)
1533 			return vma;
1534 	}
1535 
1536 	return NULL;
1537 }
1538 
1539 int
1540 nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1541 		   struct nouveau_vma *vma)
1542 {
1543 	const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1544 	struct nouveau_mem *node = nvbo->bo.mem.mm_node;
1545 	int ret;
1546 
1547 	ret = nouveau_vm_get(vm, size, nvbo->page_shift,
1548 			     NV_MEM_ACCESS_RW, vma);
1549 	if (ret)
1550 		return ret;
1551 
1552 	if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
1553 		nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
1554 	else if (nvbo->bo.mem.mem_type == TTM_PL_TT) {
1555 		if (node->sg)
1556 			nouveau_vm_map_sg_table(vma, 0, size, node);
1557 		else
1558 			nouveau_vm_map_sg(vma, 0, size, node);
1559 	}
1560 
1561 	list_add_tail(&vma->head, &nvbo->vma_list);
1562 	vma->refcount = 1;
1563 	return 0;
1564 }
1565 
1566 void
1567 nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1568 {
1569 	if (vma->node) {
1570 		if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
1571 			nouveau_vm_unmap(vma);
1572 		nouveau_vm_put(vma);
1573 		list_del(&vma->head);
1574 	}
1575 }
1576