1 /*
2  * Copyright 2007 Dave Airlied
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 /*
25  * Authors: Dave Airlied <airlied@linux.ie>
26  *	    Ben Skeggs   <darktama@iinet.net.au>
27  *	    Jeremy Kolb  <jkolb@brandeis.edu>
28  */
29 
30 #include "drmP.h"
31 #include "ttm/ttm_page_alloc.h"
32 
33 #include "nouveau_drm.h"
34 #include "nouveau_drv.h"
35 #include "nouveau_dma.h"
36 #include "nouveau_mm.h"
37 #include "nouveau_vm.h"
38 #include "nouveau_fence.h"
39 #include "nouveau_ramht.h"
40 
41 #include <linux/log2.h>
42 #include <linux/slab.h>
43 
44 static void
45 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
46 {
47 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
48 	struct drm_device *dev = dev_priv->dev;
49 	struct nouveau_bo *nvbo = nouveau_bo(bo);
50 
51 	if (unlikely(nvbo->gem))
52 		DRM_ERROR("bo %p still attached to GEM object\n", bo);
53 
54 	nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
55 	kfree(nvbo);
56 }
57 
58 static void
59 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
60 		       int *align, int *size)
61 {
62 	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
63 
64 	if (dev_priv->card_type < NV_50) {
65 		if (nvbo->tile_mode) {
66 			if (dev_priv->chipset >= 0x40) {
67 				*align = 65536;
68 				*size = roundup(*size, 64 * nvbo->tile_mode);
69 
70 			} else if (dev_priv->chipset >= 0x30) {
71 				*align = 32768;
72 				*size = roundup(*size, 64 * nvbo->tile_mode);
73 
74 			} else if (dev_priv->chipset >= 0x20) {
75 				*align = 16384;
76 				*size = roundup(*size, 64 * nvbo->tile_mode);
77 
78 			} else if (dev_priv->chipset >= 0x10) {
79 				*align = 16384;
80 				*size = roundup(*size, 32 * nvbo->tile_mode);
81 			}
82 		}
83 	} else {
84 		*size = roundup(*size, (1 << nvbo->page_shift));
85 		*align = max((1 <<  nvbo->page_shift), *align);
86 	}
87 
88 	*size = roundup(*size, PAGE_SIZE);
89 }
90 
91 int
92 nouveau_bo_new(struct drm_device *dev, int size, int align,
93 	       uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
94 	       struct sg_table *sg,
95 	       struct nouveau_bo **pnvbo)
96 {
97 	struct drm_nouveau_private *dev_priv = dev->dev_private;
98 	struct nouveau_bo *nvbo;
99 	size_t acc_size;
100 	int ret;
101 	int type = ttm_bo_type_device;
102 
103 	if (sg)
104 		type = ttm_bo_type_sg;
105 
106 	nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
107 	if (!nvbo)
108 		return -ENOMEM;
109 	INIT_LIST_HEAD(&nvbo->head);
110 	INIT_LIST_HEAD(&nvbo->entry);
111 	INIT_LIST_HEAD(&nvbo->vma_list);
112 	nvbo->tile_mode = tile_mode;
113 	nvbo->tile_flags = tile_flags;
114 	nvbo->bo.bdev = &dev_priv->ttm.bdev;
115 
116 	nvbo->page_shift = 12;
117 	if (dev_priv->bar1_vm) {
118 		if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
119 			nvbo->page_shift = dev_priv->bar1_vm->lpg_shift;
120 	}
121 
122 	nouveau_bo_fixup_align(nvbo, flags, &align, &size);
123 	nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
124 	nouveau_bo_placement_set(nvbo, flags, 0);
125 
126 	acc_size = ttm_bo_dma_acc_size(&dev_priv->ttm.bdev, size,
127 				       sizeof(struct nouveau_bo));
128 
129 	ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
130 			  type, &nvbo->placement,
131 			  align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg,
132 			  nouveau_bo_del_ttm);
133 	if (ret) {
134 		/* ttm will call nouveau_bo_del_ttm if it fails.. */
135 		return ret;
136 	}
137 
138 	*pnvbo = nvbo;
139 	return 0;
140 }
141 
142 static void
143 set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
144 {
145 	*n = 0;
146 
147 	if (type & TTM_PL_FLAG_VRAM)
148 		pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
149 	if (type & TTM_PL_FLAG_TT)
150 		pl[(*n)++] = TTM_PL_FLAG_TT | flags;
151 	if (type & TTM_PL_FLAG_SYSTEM)
152 		pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
153 }
154 
155 static void
156 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
157 {
158 	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
159 	int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
160 
161 	if (dev_priv->card_type == NV_10 &&
162 	    nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
163 	    nvbo->bo.mem.num_pages < vram_pages / 4) {
164 		/*
165 		 * Make sure that the color and depth buffers are handled
166 		 * by independent memory controller units. Up to a 9x
167 		 * speed up when alpha-blending and depth-test are enabled
168 		 * at the same time.
169 		 */
170 		if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
171 			nvbo->placement.fpfn = vram_pages / 2;
172 			nvbo->placement.lpfn = ~0;
173 		} else {
174 			nvbo->placement.fpfn = 0;
175 			nvbo->placement.lpfn = vram_pages / 2;
176 		}
177 	}
178 }
179 
180 void
181 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
182 {
183 	struct ttm_placement *pl = &nvbo->placement;
184 	uint32_t flags = TTM_PL_MASK_CACHING |
185 		(nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
186 
187 	pl->placement = nvbo->placements;
188 	set_placement_list(nvbo->placements, &pl->num_placement,
189 			   type, flags);
190 
191 	pl->busy_placement = nvbo->busy_placements;
192 	set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
193 			   type | busy, flags);
194 
195 	set_placement_range(nvbo, type);
196 }
197 
198 int
199 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
200 {
201 	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
202 	struct ttm_buffer_object *bo = &nvbo->bo;
203 	int ret;
204 
205 	if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
206 		NV_ERROR(nouveau_bdev(bo->bdev)->dev,
207 			 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
208 			 1 << bo->mem.mem_type, memtype);
209 		return -EINVAL;
210 	}
211 
212 	if (nvbo->pin_refcnt++)
213 		return 0;
214 
215 	ret = ttm_bo_reserve(bo, false, false, false, 0);
216 	if (ret)
217 		goto out;
218 
219 	nouveau_bo_placement_set(nvbo, memtype, 0);
220 
221 	ret = nouveau_bo_validate(nvbo, false, false, false);
222 	if (ret == 0) {
223 		switch (bo->mem.mem_type) {
224 		case TTM_PL_VRAM:
225 			dev_priv->fb_aper_free -= bo->mem.size;
226 			break;
227 		case TTM_PL_TT:
228 			dev_priv->gart_info.aper_free -= bo->mem.size;
229 			break;
230 		default:
231 			break;
232 		}
233 	}
234 	ttm_bo_unreserve(bo);
235 out:
236 	if (unlikely(ret))
237 		nvbo->pin_refcnt--;
238 	return ret;
239 }
240 
241 int
242 nouveau_bo_unpin(struct nouveau_bo *nvbo)
243 {
244 	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
245 	struct ttm_buffer_object *bo = &nvbo->bo;
246 	int ret;
247 
248 	if (--nvbo->pin_refcnt)
249 		return 0;
250 
251 	ret = ttm_bo_reserve(bo, false, false, false, 0);
252 	if (ret)
253 		return ret;
254 
255 	nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
256 
257 	ret = nouveau_bo_validate(nvbo, false, false, false);
258 	if (ret == 0) {
259 		switch (bo->mem.mem_type) {
260 		case TTM_PL_VRAM:
261 			dev_priv->fb_aper_free += bo->mem.size;
262 			break;
263 		case TTM_PL_TT:
264 			dev_priv->gart_info.aper_free += bo->mem.size;
265 			break;
266 		default:
267 			break;
268 		}
269 	}
270 
271 	ttm_bo_unreserve(bo);
272 	return ret;
273 }
274 
275 int
276 nouveau_bo_map(struct nouveau_bo *nvbo)
277 {
278 	int ret;
279 
280 	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
281 	if (ret)
282 		return ret;
283 
284 	ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
285 	ttm_bo_unreserve(&nvbo->bo);
286 	return ret;
287 }
288 
289 void
290 nouveau_bo_unmap(struct nouveau_bo *nvbo)
291 {
292 	if (nvbo)
293 		ttm_bo_kunmap(&nvbo->kmap);
294 }
295 
296 int
297 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
298 		    bool no_wait_reserve, bool no_wait_gpu)
299 {
300 	int ret;
301 
302 	ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
303 			      no_wait_reserve, no_wait_gpu);
304 	if (ret)
305 		return ret;
306 
307 	return 0;
308 }
309 
310 u16
311 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
312 {
313 	bool is_iomem;
314 	u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
315 	mem = &mem[index];
316 	if (is_iomem)
317 		return ioread16_native((void __force __iomem *)mem);
318 	else
319 		return *mem;
320 }
321 
322 void
323 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
324 {
325 	bool is_iomem;
326 	u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
327 	mem = &mem[index];
328 	if (is_iomem)
329 		iowrite16_native(val, (void __force __iomem *)mem);
330 	else
331 		*mem = val;
332 }
333 
334 u32
335 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
336 {
337 	bool is_iomem;
338 	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
339 	mem = &mem[index];
340 	if (is_iomem)
341 		return ioread32_native((void __force __iomem *)mem);
342 	else
343 		return *mem;
344 }
345 
346 void
347 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
348 {
349 	bool is_iomem;
350 	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
351 	mem = &mem[index];
352 	if (is_iomem)
353 		iowrite32_native(val, (void __force __iomem *)mem);
354 	else
355 		*mem = val;
356 }
357 
358 static struct ttm_tt *
359 nouveau_ttm_tt_create(struct ttm_bo_device *bdev,
360 		      unsigned long size, uint32_t page_flags,
361 		      struct page *dummy_read_page)
362 {
363 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
364 	struct drm_device *dev = dev_priv->dev;
365 
366 	switch (dev_priv->gart_info.type) {
367 #if __OS_HAS_AGP
368 	case NOUVEAU_GART_AGP:
369 		return ttm_agp_tt_create(bdev, dev->agp->bridge,
370 					 size, page_flags, dummy_read_page);
371 #endif
372 	case NOUVEAU_GART_PDMA:
373 	case NOUVEAU_GART_HW:
374 		return nouveau_sgdma_create_ttm(bdev, size, page_flags,
375 						dummy_read_page);
376 	default:
377 		NV_ERROR(dev, "Unknown GART type %d\n",
378 			 dev_priv->gart_info.type);
379 		break;
380 	}
381 
382 	return NULL;
383 }
384 
385 static int
386 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
387 {
388 	/* We'll do this from user space. */
389 	return 0;
390 }
391 
392 static int
393 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
394 			 struct ttm_mem_type_manager *man)
395 {
396 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
397 	struct drm_device *dev = dev_priv->dev;
398 
399 	switch (type) {
400 	case TTM_PL_SYSTEM:
401 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
402 		man->available_caching = TTM_PL_MASK_CACHING;
403 		man->default_caching = TTM_PL_FLAG_CACHED;
404 		break;
405 	case TTM_PL_VRAM:
406 		if (dev_priv->card_type >= NV_50) {
407 			man->func = &nouveau_vram_manager;
408 			man->io_reserve_fastpath = false;
409 			man->use_io_reserve_lru = true;
410 		} else {
411 			man->func = &ttm_bo_manager_func;
412 		}
413 		man->flags = TTM_MEMTYPE_FLAG_FIXED |
414 			     TTM_MEMTYPE_FLAG_MAPPABLE;
415 		man->available_caching = TTM_PL_FLAG_UNCACHED |
416 					 TTM_PL_FLAG_WC;
417 		man->default_caching = TTM_PL_FLAG_WC;
418 		break;
419 	case TTM_PL_TT:
420 		if (dev_priv->card_type >= NV_50)
421 			man->func = &nouveau_gart_manager;
422 		else
423 			man->func = &ttm_bo_manager_func;
424 		switch (dev_priv->gart_info.type) {
425 		case NOUVEAU_GART_AGP:
426 			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
427 			man->available_caching = TTM_PL_FLAG_UNCACHED |
428 				TTM_PL_FLAG_WC;
429 			man->default_caching = TTM_PL_FLAG_WC;
430 			break;
431 		case NOUVEAU_GART_PDMA:
432 		case NOUVEAU_GART_HW:
433 			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
434 				     TTM_MEMTYPE_FLAG_CMA;
435 			man->available_caching = TTM_PL_MASK_CACHING;
436 			man->default_caching = TTM_PL_FLAG_CACHED;
437 			break;
438 		default:
439 			NV_ERROR(dev, "Unknown GART type: %d\n",
440 				 dev_priv->gart_info.type);
441 			return -EINVAL;
442 		}
443 		break;
444 	default:
445 		NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
446 		return -EINVAL;
447 	}
448 	return 0;
449 }
450 
451 static void
452 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
453 {
454 	struct nouveau_bo *nvbo = nouveau_bo(bo);
455 
456 	switch (bo->mem.mem_type) {
457 	case TTM_PL_VRAM:
458 		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
459 					 TTM_PL_FLAG_SYSTEM);
460 		break;
461 	default:
462 		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
463 		break;
464 	}
465 
466 	*pl = nvbo->placement;
467 }
468 
469 
470 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
471  * TTM_PL_{VRAM,TT} directly.
472  */
473 
474 static int
475 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
476 			      struct nouveau_bo *nvbo, bool evict,
477 			      bool no_wait_reserve, bool no_wait_gpu,
478 			      struct ttm_mem_reg *new_mem)
479 {
480 	struct nouveau_fence *fence = NULL;
481 	int ret;
482 
483 	ret = nouveau_fence_new(chan, &fence);
484 	if (ret)
485 		return ret;
486 
487 	ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
488 					no_wait_reserve, no_wait_gpu, new_mem);
489 	nouveau_fence_unref(&fence);
490 	return ret;
491 }
492 
493 static int
494 nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
495 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
496 {
497 	struct nouveau_mem *node = old_mem->mm_node;
498 	int ret = RING_SPACE(chan, 10);
499 	if (ret == 0) {
500 		BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
501 		OUT_RING  (chan, upper_32_bits(node->vma[0].offset));
502 		OUT_RING  (chan, lower_32_bits(node->vma[0].offset));
503 		OUT_RING  (chan, upper_32_bits(node->vma[1].offset));
504 		OUT_RING  (chan, lower_32_bits(node->vma[1].offset));
505 		OUT_RING  (chan, PAGE_SIZE);
506 		OUT_RING  (chan, PAGE_SIZE);
507 		OUT_RING  (chan, PAGE_SIZE);
508 		OUT_RING  (chan, new_mem->num_pages);
509 		BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
510 	}
511 	return ret;
512 }
513 
514 static int
515 nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
516 {
517 	int ret = RING_SPACE(chan, 2);
518 	if (ret == 0) {
519 		BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
520 		OUT_RING  (chan, handle);
521 	}
522 	return ret;
523 }
524 
525 static int
526 nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
527 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
528 {
529 	struct nouveau_mem *node = old_mem->mm_node;
530 	u64 src_offset = node->vma[0].offset;
531 	u64 dst_offset = node->vma[1].offset;
532 	u32 page_count = new_mem->num_pages;
533 	int ret;
534 
535 	page_count = new_mem->num_pages;
536 	while (page_count) {
537 		int line_count = (page_count > 8191) ? 8191 : page_count;
538 
539 		ret = RING_SPACE(chan, 11);
540 		if (ret)
541 			return ret;
542 
543 		BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
544 		OUT_RING  (chan, upper_32_bits(src_offset));
545 		OUT_RING  (chan, lower_32_bits(src_offset));
546 		OUT_RING  (chan, upper_32_bits(dst_offset));
547 		OUT_RING  (chan, lower_32_bits(dst_offset));
548 		OUT_RING  (chan, PAGE_SIZE);
549 		OUT_RING  (chan, PAGE_SIZE);
550 		OUT_RING  (chan, PAGE_SIZE);
551 		OUT_RING  (chan, line_count);
552 		BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
553 		OUT_RING  (chan, 0x00000110);
554 
555 		page_count -= line_count;
556 		src_offset += (PAGE_SIZE * line_count);
557 		dst_offset += (PAGE_SIZE * line_count);
558 	}
559 
560 	return 0;
561 }
562 
563 static int
564 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
565 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
566 {
567 	struct nouveau_mem *node = old_mem->mm_node;
568 	u64 src_offset = node->vma[0].offset;
569 	u64 dst_offset = node->vma[1].offset;
570 	u32 page_count = new_mem->num_pages;
571 	int ret;
572 
573 	page_count = new_mem->num_pages;
574 	while (page_count) {
575 		int line_count = (page_count > 2047) ? 2047 : page_count;
576 
577 		ret = RING_SPACE(chan, 12);
578 		if (ret)
579 			return ret;
580 
581 		BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
582 		OUT_RING  (chan, upper_32_bits(dst_offset));
583 		OUT_RING  (chan, lower_32_bits(dst_offset));
584 		BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
585 		OUT_RING  (chan, upper_32_bits(src_offset));
586 		OUT_RING  (chan, lower_32_bits(src_offset));
587 		OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
588 		OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
589 		OUT_RING  (chan, PAGE_SIZE); /* line_length */
590 		OUT_RING  (chan, line_count);
591 		BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
592 		OUT_RING  (chan, 0x00100110);
593 
594 		page_count -= line_count;
595 		src_offset += (PAGE_SIZE * line_count);
596 		dst_offset += (PAGE_SIZE * line_count);
597 	}
598 
599 	return 0;
600 }
601 
602 static int
603 nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
604 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
605 {
606 	struct nouveau_mem *node = old_mem->mm_node;
607 	u64 src_offset = node->vma[0].offset;
608 	u64 dst_offset = node->vma[1].offset;
609 	u32 page_count = new_mem->num_pages;
610 	int ret;
611 
612 	page_count = new_mem->num_pages;
613 	while (page_count) {
614 		int line_count = (page_count > 8191) ? 8191 : page_count;
615 
616 		ret = RING_SPACE(chan, 11);
617 		if (ret)
618 			return ret;
619 
620 		BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
621 		OUT_RING  (chan, upper_32_bits(src_offset));
622 		OUT_RING  (chan, lower_32_bits(src_offset));
623 		OUT_RING  (chan, upper_32_bits(dst_offset));
624 		OUT_RING  (chan, lower_32_bits(dst_offset));
625 		OUT_RING  (chan, PAGE_SIZE);
626 		OUT_RING  (chan, PAGE_SIZE);
627 		OUT_RING  (chan, PAGE_SIZE);
628 		OUT_RING  (chan, line_count);
629 		BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
630 		OUT_RING  (chan, 0x00000110);
631 
632 		page_count -= line_count;
633 		src_offset += (PAGE_SIZE * line_count);
634 		dst_offset += (PAGE_SIZE * line_count);
635 	}
636 
637 	return 0;
638 }
639 
640 static int
641 nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
642 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
643 {
644 	struct nouveau_mem *node = old_mem->mm_node;
645 	int ret = RING_SPACE(chan, 7);
646 	if (ret == 0) {
647 		BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
648 		OUT_RING  (chan, upper_32_bits(node->vma[0].offset));
649 		OUT_RING  (chan, lower_32_bits(node->vma[0].offset));
650 		OUT_RING  (chan, upper_32_bits(node->vma[1].offset));
651 		OUT_RING  (chan, lower_32_bits(node->vma[1].offset));
652 		OUT_RING  (chan, 0x00000000 /* COPY */);
653 		OUT_RING  (chan, new_mem->num_pages << PAGE_SHIFT);
654 	}
655 	return ret;
656 }
657 
658 static int
659 nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
660 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
661 {
662 	struct nouveau_mem *node = old_mem->mm_node;
663 	int ret = RING_SPACE(chan, 7);
664 	if (ret == 0) {
665 		BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
666 		OUT_RING  (chan, new_mem->num_pages << PAGE_SHIFT);
667 		OUT_RING  (chan, upper_32_bits(node->vma[0].offset));
668 		OUT_RING  (chan, lower_32_bits(node->vma[0].offset));
669 		OUT_RING  (chan, upper_32_bits(node->vma[1].offset));
670 		OUT_RING  (chan, lower_32_bits(node->vma[1].offset));
671 		OUT_RING  (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
672 	}
673 	return ret;
674 }
675 
676 static int
677 nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
678 {
679 	int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
680 					 &chan->m2mf_ntfy);
681 	if (ret == 0) {
682 		ret = RING_SPACE(chan, 6);
683 		if (ret == 0) {
684 			BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
685 			OUT_RING  (chan, handle);
686 			BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
687 			OUT_RING  (chan, NvNotify0);
688 			OUT_RING  (chan, NvDmaFB);
689 			OUT_RING  (chan, NvDmaFB);
690 		} else {
691 			nouveau_ramht_remove(chan, NvNotify0);
692 		}
693 	}
694 
695 	return ret;
696 }
697 
698 static int
699 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
700 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
701 {
702 	struct nouveau_mem *node = old_mem->mm_node;
703 	struct nouveau_bo *nvbo = nouveau_bo(bo);
704 	u64 length = (new_mem->num_pages << PAGE_SHIFT);
705 	u64 src_offset = node->vma[0].offset;
706 	u64 dst_offset = node->vma[1].offset;
707 	int ret;
708 
709 	while (length) {
710 		u32 amount, stride, height;
711 
712 		amount  = min(length, (u64)(4 * 1024 * 1024));
713 		stride  = 16 * 4;
714 		height  = amount / stride;
715 
716 		if (new_mem->mem_type == TTM_PL_VRAM &&
717 		    nouveau_bo_tile_layout(nvbo)) {
718 			ret = RING_SPACE(chan, 8);
719 			if (ret)
720 				return ret;
721 
722 			BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
723 			OUT_RING  (chan, 0);
724 			OUT_RING  (chan, 0);
725 			OUT_RING  (chan, stride);
726 			OUT_RING  (chan, height);
727 			OUT_RING  (chan, 1);
728 			OUT_RING  (chan, 0);
729 			OUT_RING  (chan, 0);
730 		} else {
731 			ret = RING_SPACE(chan, 2);
732 			if (ret)
733 				return ret;
734 
735 			BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
736 			OUT_RING  (chan, 1);
737 		}
738 		if (old_mem->mem_type == TTM_PL_VRAM &&
739 		    nouveau_bo_tile_layout(nvbo)) {
740 			ret = RING_SPACE(chan, 8);
741 			if (ret)
742 				return ret;
743 
744 			BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
745 			OUT_RING  (chan, 0);
746 			OUT_RING  (chan, 0);
747 			OUT_RING  (chan, stride);
748 			OUT_RING  (chan, height);
749 			OUT_RING  (chan, 1);
750 			OUT_RING  (chan, 0);
751 			OUT_RING  (chan, 0);
752 		} else {
753 			ret = RING_SPACE(chan, 2);
754 			if (ret)
755 				return ret;
756 
757 			BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
758 			OUT_RING  (chan, 1);
759 		}
760 
761 		ret = RING_SPACE(chan, 14);
762 		if (ret)
763 			return ret;
764 
765 		BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
766 		OUT_RING  (chan, upper_32_bits(src_offset));
767 		OUT_RING  (chan, upper_32_bits(dst_offset));
768 		BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
769 		OUT_RING  (chan, lower_32_bits(src_offset));
770 		OUT_RING  (chan, lower_32_bits(dst_offset));
771 		OUT_RING  (chan, stride);
772 		OUT_RING  (chan, stride);
773 		OUT_RING  (chan, stride);
774 		OUT_RING  (chan, height);
775 		OUT_RING  (chan, 0x00000101);
776 		OUT_RING  (chan, 0x00000000);
777 		BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
778 		OUT_RING  (chan, 0);
779 
780 		length -= amount;
781 		src_offset += amount;
782 		dst_offset += amount;
783 	}
784 
785 	return 0;
786 }
787 
788 static int
789 nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
790 {
791 	int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
792 					 &chan->m2mf_ntfy);
793 	if (ret == 0) {
794 		ret = RING_SPACE(chan, 4);
795 		if (ret == 0) {
796 			BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
797 			OUT_RING  (chan, handle);
798 			BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
799 			OUT_RING  (chan, NvNotify0);
800 		}
801 	}
802 
803 	return ret;
804 }
805 
806 static inline uint32_t
807 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
808 		      struct nouveau_channel *chan, struct ttm_mem_reg *mem)
809 {
810 	if (mem->mem_type == TTM_PL_TT)
811 		return chan->gart_handle;
812 	return chan->vram_handle;
813 }
814 
815 static int
816 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
817 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
818 {
819 	u32 src_offset = old_mem->start << PAGE_SHIFT;
820 	u32 dst_offset = new_mem->start << PAGE_SHIFT;
821 	u32 page_count = new_mem->num_pages;
822 	int ret;
823 
824 	ret = RING_SPACE(chan, 3);
825 	if (ret)
826 		return ret;
827 
828 	BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
829 	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
830 	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
831 
832 	page_count = new_mem->num_pages;
833 	while (page_count) {
834 		int line_count = (page_count > 2047) ? 2047 : page_count;
835 
836 		ret = RING_SPACE(chan, 11);
837 		if (ret)
838 			return ret;
839 
840 		BEGIN_NV04(chan, NvSubCopy,
841 				 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
842 		OUT_RING  (chan, src_offset);
843 		OUT_RING  (chan, dst_offset);
844 		OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
845 		OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
846 		OUT_RING  (chan, PAGE_SIZE); /* line_length */
847 		OUT_RING  (chan, line_count);
848 		OUT_RING  (chan, 0x00000101);
849 		OUT_RING  (chan, 0x00000000);
850 		BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
851 		OUT_RING  (chan, 0);
852 
853 		page_count -= line_count;
854 		src_offset += (PAGE_SIZE * line_count);
855 		dst_offset += (PAGE_SIZE * line_count);
856 	}
857 
858 	return 0;
859 }
860 
861 static int
862 nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
863 		   struct ttm_mem_reg *mem, struct nouveau_vma *vma)
864 {
865 	struct nouveau_mem *node = mem->mm_node;
866 	int ret;
867 
868 	ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT,
869 			     node->page_shift, NV_MEM_ACCESS_RO, vma);
870 	if (ret)
871 		return ret;
872 
873 	if (mem->mem_type == TTM_PL_VRAM)
874 		nouveau_vm_map(vma, node);
875 	else
876 		nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
877 
878 	return 0;
879 }
880 
881 static int
882 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
883 		     bool no_wait_reserve, bool no_wait_gpu,
884 		     struct ttm_mem_reg *new_mem)
885 {
886 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
887 	struct nouveau_channel *chan = chan = dev_priv->channel;
888 	struct nouveau_bo *nvbo = nouveau_bo(bo);
889 	struct ttm_mem_reg *old_mem = &bo->mem;
890 	int ret;
891 
892 	mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
893 
894 	/* create temporary vmas for the transfer and attach them to the
895 	 * old nouveau_mem node, these will get cleaned up after ttm has
896 	 * destroyed the ttm_mem_reg
897 	 */
898 	if (dev_priv->card_type >= NV_50) {
899 		struct nouveau_mem *node = old_mem->mm_node;
900 
901 		ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
902 		if (ret)
903 			goto out;
904 
905 		ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
906 		if (ret)
907 			goto out;
908 	}
909 
910 	ret = dev_priv->ttm.move(chan, bo, &bo->mem, new_mem);
911 	if (ret == 0) {
912 		ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
913 						    no_wait_reserve,
914 						    no_wait_gpu, new_mem);
915 	}
916 
917 out:
918 	mutex_unlock(&chan->mutex);
919 	return ret;
920 }
921 
922 void
923 nouveau_bo_move_init(struct nouveau_channel *chan)
924 {
925 	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
926 	static const struct {
927 		const char *name;
928 		int engine;
929 		u32 oclass;
930 		int (*exec)(struct nouveau_channel *,
931 			    struct ttm_buffer_object *,
932 			    struct ttm_mem_reg *, struct ttm_mem_reg *);
933 		int (*init)(struct nouveau_channel *, u32 handle);
934 	} _methods[] = {
935 		{  "COPY", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
936 		{ "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
937 		{ "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
938 		{  "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
939 		{ "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
940 		{  "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
941 		{  "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
942 		{  "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
943 		{},
944 		{ "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
945 	}, *mthd = _methods;
946 	const char *name = "CPU";
947 	int ret;
948 
949 	do {
950 		u32 handle = (mthd->engine << 16) | mthd->oclass;
951 		ret = nouveau_gpuobj_gr_new(chan, handle, mthd->oclass);
952 		if (ret == 0) {
953 			ret = mthd->init(chan, handle);
954 			if (ret == 0) {
955 				dev_priv->ttm.move = mthd->exec;
956 				name = mthd->name;
957 				break;
958 			}
959 		}
960 	} while ((++mthd)->exec);
961 
962 	NV_INFO(chan->dev, "MM: using %s for buffer copies\n", name);
963 }
964 
965 static int
966 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
967 		      bool no_wait_reserve, bool no_wait_gpu,
968 		      struct ttm_mem_reg *new_mem)
969 {
970 	u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
971 	struct ttm_placement placement;
972 	struct ttm_mem_reg tmp_mem;
973 	int ret;
974 
975 	placement.fpfn = placement.lpfn = 0;
976 	placement.num_placement = placement.num_busy_placement = 1;
977 	placement.placement = placement.busy_placement = &placement_memtype;
978 
979 	tmp_mem = *new_mem;
980 	tmp_mem.mm_node = NULL;
981 	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
982 	if (ret)
983 		return ret;
984 
985 	ret = ttm_tt_bind(bo->ttm, &tmp_mem);
986 	if (ret)
987 		goto out;
988 
989 	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
990 	if (ret)
991 		goto out;
992 
993 	ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
994 out:
995 	ttm_bo_mem_put(bo, &tmp_mem);
996 	return ret;
997 }
998 
999 static int
1000 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1001 		      bool no_wait_reserve, bool no_wait_gpu,
1002 		      struct ttm_mem_reg *new_mem)
1003 {
1004 	u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1005 	struct ttm_placement placement;
1006 	struct ttm_mem_reg tmp_mem;
1007 	int ret;
1008 
1009 	placement.fpfn = placement.lpfn = 0;
1010 	placement.num_placement = placement.num_busy_placement = 1;
1011 	placement.placement = placement.busy_placement = &placement_memtype;
1012 
1013 	tmp_mem = *new_mem;
1014 	tmp_mem.mm_node = NULL;
1015 	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
1016 	if (ret)
1017 		return ret;
1018 
1019 	ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
1020 	if (ret)
1021 		goto out;
1022 
1023 	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
1024 	if (ret)
1025 		goto out;
1026 
1027 out:
1028 	ttm_bo_mem_put(bo, &tmp_mem);
1029 	return ret;
1030 }
1031 
1032 static void
1033 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1034 {
1035 	struct nouveau_bo *nvbo = nouveau_bo(bo);
1036 	struct nouveau_vma *vma;
1037 
1038 	/* ttm can now (stupidly) pass the driver bos it didn't create... */
1039 	if (bo->destroy != nouveau_bo_del_ttm)
1040 		return;
1041 
1042 	list_for_each_entry(vma, &nvbo->vma_list, head) {
1043 		if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
1044 			nouveau_vm_map(vma, new_mem->mm_node);
1045 		} else
1046 		if (new_mem && new_mem->mem_type == TTM_PL_TT &&
1047 		    nvbo->page_shift == vma->vm->spg_shift) {
1048 			if (((struct nouveau_mem *)new_mem->mm_node)->sg)
1049 				nouveau_vm_map_sg_table(vma, 0, new_mem->
1050 						  num_pages << PAGE_SHIFT,
1051 						  new_mem->mm_node);
1052 			else
1053 				nouveau_vm_map_sg(vma, 0, new_mem->
1054 						  num_pages << PAGE_SHIFT,
1055 						  new_mem->mm_node);
1056 		} else {
1057 			nouveau_vm_unmap(vma);
1058 		}
1059 	}
1060 }
1061 
1062 static int
1063 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
1064 		   struct nouveau_tile_reg **new_tile)
1065 {
1066 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1067 	struct drm_device *dev = dev_priv->dev;
1068 	struct nouveau_bo *nvbo = nouveau_bo(bo);
1069 	u64 offset = new_mem->start << PAGE_SHIFT;
1070 
1071 	*new_tile = NULL;
1072 	if (new_mem->mem_type != TTM_PL_VRAM)
1073 		return 0;
1074 
1075 	if (dev_priv->card_type >= NV_10) {
1076 		*new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
1077 						nvbo->tile_mode,
1078 						nvbo->tile_flags);
1079 	}
1080 
1081 	return 0;
1082 }
1083 
1084 static void
1085 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1086 		      struct nouveau_tile_reg *new_tile,
1087 		      struct nouveau_tile_reg **old_tile)
1088 {
1089 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1090 	struct drm_device *dev = dev_priv->dev;
1091 
1092 	nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
1093 	*old_tile = new_tile;
1094 }
1095 
1096 static int
1097 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1098 		bool no_wait_reserve, bool no_wait_gpu,
1099 		struct ttm_mem_reg *new_mem)
1100 {
1101 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1102 	struct nouveau_bo *nvbo = nouveau_bo(bo);
1103 	struct ttm_mem_reg *old_mem = &bo->mem;
1104 	struct nouveau_tile_reg *new_tile = NULL;
1105 	int ret = 0;
1106 
1107 	if (dev_priv->card_type < NV_50) {
1108 		ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1109 		if (ret)
1110 			return ret;
1111 	}
1112 
1113 	/* Fake bo copy. */
1114 	if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1115 		BUG_ON(bo->mem.mm_node != NULL);
1116 		bo->mem = *new_mem;
1117 		new_mem->mm_node = NULL;
1118 		goto out;
1119 	}
1120 
1121 	/* CPU copy if we have no accelerated method available */
1122 	if (!dev_priv->ttm.move) {
1123 		ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
1124 		goto out;
1125 	}
1126 
1127 	/* Hardware assisted copy. */
1128 	if (new_mem->mem_type == TTM_PL_SYSTEM)
1129 		ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
1130 	else if (old_mem->mem_type == TTM_PL_SYSTEM)
1131 		ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
1132 	else
1133 		ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
1134 
1135 	if (!ret)
1136 		goto out;
1137 
1138 	/* Fallback to software copy. */
1139 	ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
1140 
1141 out:
1142 	if (dev_priv->card_type < NV_50) {
1143 		if (ret)
1144 			nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1145 		else
1146 			nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1147 	}
1148 
1149 	return ret;
1150 }
1151 
1152 static int
1153 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1154 {
1155 	return 0;
1156 }
1157 
1158 static int
1159 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1160 {
1161 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1162 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
1163 	struct drm_device *dev = dev_priv->dev;
1164 	int ret;
1165 
1166 	mem->bus.addr = NULL;
1167 	mem->bus.offset = 0;
1168 	mem->bus.size = mem->num_pages << PAGE_SHIFT;
1169 	mem->bus.base = 0;
1170 	mem->bus.is_iomem = false;
1171 	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1172 		return -EINVAL;
1173 	switch (mem->mem_type) {
1174 	case TTM_PL_SYSTEM:
1175 		/* System memory */
1176 		return 0;
1177 	case TTM_PL_TT:
1178 #if __OS_HAS_AGP
1179 		if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
1180 			mem->bus.offset = mem->start << PAGE_SHIFT;
1181 			mem->bus.base = dev_priv->gart_info.aper_base;
1182 			mem->bus.is_iomem = true;
1183 		}
1184 #endif
1185 		break;
1186 	case TTM_PL_VRAM:
1187 	{
1188 		struct nouveau_mem *node = mem->mm_node;
1189 		u8 page_shift;
1190 
1191 		if (!dev_priv->bar1_vm) {
1192 			mem->bus.offset = mem->start << PAGE_SHIFT;
1193 			mem->bus.base = pci_resource_start(dev->pdev, 1);
1194 			mem->bus.is_iomem = true;
1195 			break;
1196 		}
1197 
1198 		if (dev_priv->card_type >= NV_C0)
1199 			page_shift = node->page_shift;
1200 		else
1201 			page_shift = 12;
1202 
1203 		ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
1204 				     page_shift, NV_MEM_ACCESS_RW,
1205 				     &node->bar_vma);
1206 		if (ret)
1207 			return ret;
1208 
1209 		nouveau_vm_map(&node->bar_vma, node);
1210 		if (ret) {
1211 			nouveau_vm_put(&node->bar_vma);
1212 			return ret;
1213 		}
1214 
1215 		mem->bus.offset = node->bar_vma.offset;
1216 		if (dev_priv->card_type == NV_50) /*XXX*/
1217 			mem->bus.offset -= 0x0020000000ULL;
1218 		mem->bus.base = pci_resource_start(dev->pdev, 1);
1219 		mem->bus.is_iomem = true;
1220 	}
1221 		break;
1222 	default:
1223 		return -EINVAL;
1224 	}
1225 	return 0;
1226 }
1227 
1228 static void
1229 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1230 {
1231 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
1232 	struct nouveau_mem *node = mem->mm_node;
1233 
1234 	if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
1235 		return;
1236 
1237 	if (!node->bar_vma.node)
1238 		return;
1239 
1240 	nouveau_vm_unmap(&node->bar_vma);
1241 	nouveau_vm_put(&node->bar_vma);
1242 }
1243 
1244 static int
1245 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1246 {
1247 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1248 	struct nouveau_bo *nvbo = nouveau_bo(bo);
1249 
1250 	/* as long as the bo isn't in vram, and isn't tiled, we've got
1251 	 * nothing to do here.
1252 	 */
1253 	if (bo->mem.mem_type != TTM_PL_VRAM) {
1254 		if (dev_priv->card_type < NV_50 ||
1255 		    !nouveau_bo_tile_layout(nvbo))
1256 			return 0;
1257 	}
1258 
1259 	/* make sure bo is in mappable vram */
1260 	if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
1261 		return 0;
1262 
1263 
1264 	nvbo->placement.fpfn = 0;
1265 	nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
1266 	nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1267 	return nouveau_bo_validate(nvbo, false, true, false);
1268 }
1269 
1270 static int
1271 nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1272 {
1273 	struct ttm_dma_tt *ttm_dma = (void *)ttm;
1274 	struct drm_nouveau_private *dev_priv;
1275 	struct drm_device *dev;
1276 	unsigned i;
1277 	int r;
1278 	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1279 
1280 	if (ttm->state != tt_unpopulated)
1281 		return 0;
1282 
1283 	if (slave && ttm->sg) {
1284 		/* make userspace faulting work */
1285 		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1286 						 ttm_dma->dma_address, ttm->num_pages);
1287 		ttm->state = tt_unbound;
1288 		return 0;
1289 	}
1290 
1291 	dev_priv = nouveau_bdev(ttm->bdev);
1292 	dev = dev_priv->dev;
1293 
1294 #if __OS_HAS_AGP
1295 	if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
1296 		return ttm_agp_tt_populate(ttm);
1297 	}
1298 #endif
1299 
1300 #ifdef CONFIG_SWIOTLB
1301 	if (swiotlb_nr_tbl()) {
1302 		return ttm_dma_populate((void *)ttm, dev->dev);
1303 	}
1304 #endif
1305 
1306 	r = ttm_pool_populate(ttm);
1307 	if (r) {
1308 		return r;
1309 	}
1310 
1311 	for (i = 0; i < ttm->num_pages; i++) {
1312 		ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
1313 						   0, PAGE_SIZE,
1314 						   PCI_DMA_BIDIRECTIONAL);
1315 		if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
1316 			while (--i) {
1317 				pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
1318 					       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1319 				ttm_dma->dma_address[i] = 0;
1320 			}
1321 			ttm_pool_unpopulate(ttm);
1322 			return -EFAULT;
1323 		}
1324 	}
1325 	return 0;
1326 }
1327 
1328 static void
1329 nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1330 {
1331 	struct ttm_dma_tt *ttm_dma = (void *)ttm;
1332 	struct drm_nouveau_private *dev_priv;
1333 	struct drm_device *dev;
1334 	unsigned i;
1335 	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1336 
1337 	if (slave)
1338 		return;
1339 
1340 	dev_priv = nouveau_bdev(ttm->bdev);
1341 	dev = dev_priv->dev;
1342 
1343 #if __OS_HAS_AGP
1344 	if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
1345 		ttm_agp_tt_unpopulate(ttm);
1346 		return;
1347 	}
1348 #endif
1349 
1350 #ifdef CONFIG_SWIOTLB
1351 	if (swiotlb_nr_tbl()) {
1352 		ttm_dma_unpopulate((void *)ttm, dev->dev);
1353 		return;
1354 	}
1355 #endif
1356 
1357 	for (i = 0; i < ttm->num_pages; i++) {
1358 		if (ttm_dma->dma_address[i]) {
1359 			pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
1360 				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1361 		}
1362 	}
1363 
1364 	ttm_pool_unpopulate(ttm);
1365 }
1366 
1367 void
1368 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1369 {
1370 	struct nouveau_fence *old_fence = NULL;
1371 
1372 	if (likely(fence))
1373 		nouveau_fence_ref(fence);
1374 
1375 	spin_lock(&nvbo->bo.bdev->fence_lock);
1376 	old_fence = nvbo->bo.sync_obj;
1377 	nvbo->bo.sync_obj = fence;
1378 	spin_unlock(&nvbo->bo.bdev->fence_lock);
1379 
1380 	nouveau_fence_unref(&old_fence);
1381 }
1382 
1383 static void
1384 nouveau_bo_fence_unref(void **sync_obj)
1385 {
1386 	nouveau_fence_unref((struct nouveau_fence **)sync_obj);
1387 }
1388 
1389 static void *
1390 nouveau_bo_fence_ref(void *sync_obj)
1391 {
1392 	return nouveau_fence_ref(sync_obj);
1393 }
1394 
1395 static bool
1396 nouveau_bo_fence_signalled(void *sync_obj, void *sync_arg)
1397 {
1398 	return nouveau_fence_done(sync_obj);
1399 }
1400 
1401 static int
1402 nouveau_bo_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
1403 {
1404 	return nouveau_fence_wait(sync_obj, lazy, intr);
1405 }
1406 
1407 static int
1408 nouveau_bo_fence_flush(void *sync_obj, void *sync_arg)
1409 {
1410 	return 0;
1411 }
1412 
1413 struct ttm_bo_driver nouveau_bo_driver = {
1414 	.ttm_tt_create = &nouveau_ttm_tt_create,
1415 	.ttm_tt_populate = &nouveau_ttm_tt_populate,
1416 	.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1417 	.invalidate_caches = nouveau_bo_invalidate_caches,
1418 	.init_mem_type = nouveau_bo_init_mem_type,
1419 	.evict_flags = nouveau_bo_evict_flags,
1420 	.move_notify = nouveau_bo_move_ntfy,
1421 	.move = nouveau_bo_move,
1422 	.verify_access = nouveau_bo_verify_access,
1423 	.sync_obj_signaled = nouveau_bo_fence_signalled,
1424 	.sync_obj_wait = nouveau_bo_fence_wait,
1425 	.sync_obj_flush = nouveau_bo_fence_flush,
1426 	.sync_obj_unref = nouveau_bo_fence_unref,
1427 	.sync_obj_ref = nouveau_bo_fence_ref,
1428 	.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1429 	.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1430 	.io_mem_free = &nouveau_ttm_io_mem_free,
1431 };
1432 
1433 struct nouveau_vma *
1434 nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
1435 {
1436 	struct nouveau_vma *vma;
1437 	list_for_each_entry(vma, &nvbo->vma_list, head) {
1438 		if (vma->vm == vm)
1439 			return vma;
1440 	}
1441 
1442 	return NULL;
1443 }
1444 
1445 int
1446 nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1447 		   struct nouveau_vma *vma)
1448 {
1449 	const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1450 	struct nouveau_mem *node = nvbo->bo.mem.mm_node;
1451 	int ret;
1452 
1453 	ret = nouveau_vm_get(vm, size, nvbo->page_shift,
1454 			     NV_MEM_ACCESS_RW, vma);
1455 	if (ret)
1456 		return ret;
1457 
1458 	if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
1459 		nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
1460 	else if (nvbo->bo.mem.mem_type == TTM_PL_TT) {
1461 		if (node->sg)
1462 			nouveau_vm_map_sg_table(vma, 0, size, node);
1463 		else
1464 			nouveau_vm_map_sg(vma, 0, size, node);
1465 	}
1466 
1467 	list_add_tail(&vma->head, &nvbo->vma_list);
1468 	vma->refcount = 1;
1469 	return 0;
1470 }
1471 
1472 void
1473 nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1474 {
1475 	if (vma->node) {
1476 		if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) {
1477 			spin_lock(&nvbo->bo.bdev->fence_lock);
1478 			ttm_bo_wait(&nvbo->bo, false, false, false);
1479 			spin_unlock(&nvbo->bo.bdev->fence_lock);
1480 			nouveau_vm_unmap(vma);
1481 		}
1482 
1483 		nouveau_vm_put(vma);
1484 		list_del(&vma->head);
1485 	}
1486 }
1487