1 /*
2  * Copyright 2007 Dave Airlied
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 /*
25  * Authors: Dave Airlied <airlied@linux.ie>
26  *	    Ben Skeggs   <darktama@iinet.net.au>
27  *	    Jeremy Kolb  <jkolb@brandeis.edu>
28  */
29 
30 #include "drmP.h"
31 
32 #include "nouveau_drm.h"
33 #include "nouveau_drv.h"
34 #include "nouveau_dma.h"
35 #include "nouveau_mm.h"
36 #include "nouveau_vm.h"
37 
38 #include <linux/log2.h>
39 #include <linux/slab.h>
40 
41 static void
42 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
43 {
44 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
45 	struct drm_device *dev = dev_priv->dev;
46 	struct nouveau_bo *nvbo = nouveau_bo(bo);
47 
48 	if (unlikely(nvbo->gem))
49 		DRM_ERROR("bo %p still attached to GEM object\n", bo);
50 
51 	nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
52 	if (nvbo->vma.node) {
53 		nouveau_vm_unmap(&nvbo->vma);
54 		nouveau_vm_put(&nvbo->vma);
55 	}
56 	kfree(nvbo);
57 }
58 
59 static void
60 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
61 		       int *align, int *size, int *page_shift)
62 {
63 	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
64 
65 	if (dev_priv->card_type < NV_50) {
66 		if (nvbo->tile_mode) {
67 			if (dev_priv->chipset >= 0x40) {
68 				*align = 65536;
69 				*size = roundup(*size, 64 * nvbo->tile_mode);
70 
71 			} else if (dev_priv->chipset >= 0x30) {
72 				*align = 32768;
73 				*size = roundup(*size, 64 * nvbo->tile_mode);
74 
75 			} else if (dev_priv->chipset >= 0x20) {
76 				*align = 16384;
77 				*size = roundup(*size, 64 * nvbo->tile_mode);
78 
79 			} else if (dev_priv->chipset >= 0x10) {
80 				*align = 16384;
81 				*size = roundup(*size, 32 * nvbo->tile_mode);
82 			}
83 		}
84 	} else {
85 		if (likely(dev_priv->chan_vm)) {
86 			if (!(flags & TTM_PL_FLAG_TT) &&  *size > 256 * 1024)
87 				*page_shift = dev_priv->chan_vm->lpg_shift;
88 			else
89 				*page_shift = dev_priv->chan_vm->spg_shift;
90 		} else {
91 			*page_shift = 12;
92 		}
93 
94 		*size = roundup(*size, (1 << *page_shift));
95 		*align = max((1 << *page_shift), *align);
96 	}
97 
98 	*size = roundup(*size, PAGE_SIZE);
99 }
100 
101 int
102 nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
103 	       int size, int align, uint32_t flags, uint32_t tile_mode,
104 	       uint32_t tile_flags, struct nouveau_bo **pnvbo)
105 {
106 	struct drm_nouveau_private *dev_priv = dev->dev_private;
107 	struct nouveau_bo *nvbo;
108 	int ret = 0, page_shift = 0;
109 
110 	nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
111 	if (!nvbo)
112 		return -ENOMEM;
113 	INIT_LIST_HEAD(&nvbo->head);
114 	INIT_LIST_HEAD(&nvbo->entry);
115 	nvbo->tile_mode = tile_mode;
116 	nvbo->tile_flags = tile_flags;
117 	nvbo->bo.bdev = &dev_priv->ttm.bdev;
118 
119 	nouveau_bo_fixup_align(nvbo, flags, &align, &size, &page_shift);
120 	align >>= PAGE_SHIFT;
121 
122 	if (dev_priv->chan_vm) {
123 		ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
124 				     NV_MEM_ACCESS_RW, &nvbo->vma);
125 		if (ret) {
126 			kfree(nvbo);
127 			return ret;
128 		}
129 	}
130 
131 	nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
132 	nouveau_bo_placement_set(nvbo, flags, 0);
133 
134 	nvbo->channel = chan;
135 	ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
136 			  ttm_bo_type_device, &nvbo->placement, align, 0,
137 			  false, NULL, size, nouveau_bo_del_ttm);
138 	if (ret) {
139 		/* ttm will call nouveau_bo_del_ttm if it fails.. */
140 		return ret;
141 	}
142 	nvbo->channel = NULL;
143 
144 	if (nvbo->vma.node)
145 		nvbo->bo.offset = nvbo->vma.offset;
146 	*pnvbo = nvbo;
147 	return 0;
148 }
149 
150 static void
151 set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
152 {
153 	*n = 0;
154 
155 	if (type & TTM_PL_FLAG_VRAM)
156 		pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
157 	if (type & TTM_PL_FLAG_TT)
158 		pl[(*n)++] = TTM_PL_FLAG_TT | flags;
159 	if (type & TTM_PL_FLAG_SYSTEM)
160 		pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
161 }
162 
163 static void
164 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
165 {
166 	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
167 	int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
168 
169 	if (dev_priv->card_type == NV_10 &&
170 	    nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
171 	    nvbo->bo.mem.num_pages < vram_pages / 2) {
172 		/*
173 		 * Make sure that the color and depth buffers are handled
174 		 * by independent memory controller units. Up to a 9x
175 		 * speed up when alpha-blending and depth-test are enabled
176 		 * at the same time.
177 		 */
178 		if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
179 			nvbo->placement.fpfn = vram_pages / 2;
180 			nvbo->placement.lpfn = ~0;
181 		} else {
182 			nvbo->placement.fpfn = 0;
183 			nvbo->placement.lpfn = vram_pages / 2;
184 		}
185 	}
186 }
187 
188 void
189 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
190 {
191 	struct ttm_placement *pl = &nvbo->placement;
192 	uint32_t flags = TTM_PL_MASK_CACHING |
193 		(nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
194 
195 	pl->placement = nvbo->placements;
196 	set_placement_list(nvbo->placements, &pl->num_placement,
197 			   type, flags);
198 
199 	pl->busy_placement = nvbo->busy_placements;
200 	set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
201 			   type | busy, flags);
202 
203 	set_placement_range(nvbo, type);
204 }
205 
206 int
207 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
208 {
209 	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
210 	struct ttm_buffer_object *bo = &nvbo->bo;
211 	int ret;
212 
213 	if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
214 		NV_ERROR(nouveau_bdev(bo->bdev)->dev,
215 			 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
216 			 1 << bo->mem.mem_type, memtype);
217 		return -EINVAL;
218 	}
219 
220 	if (nvbo->pin_refcnt++)
221 		return 0;
222 
223 	ret = ttm_bo_reserve(bo, false, false, false, 0);
224 	if (ret)
225 		goto out;
226 
227 	nouveau_bo_placement_set(nvbo, memtype, 0);
228 
229 	ret = nouveau_bo_validate(nvbo, false, false, false);
230 	if (ret == 0) {
231 		switch (bo->mem.mem_type) {
232 		case TTM_PL_VRAM:
233 			dev_priv->fb_aper_free -= bo->mem.size;
234 			break;
235 		case TTM_PL_TT:
236 			dev_priv->gart_info.aper_free -= bo->mem.size;
237 			break;
238 		default:
239 			break;
240 		}
241 	}
242 	ttm_bo_unreserve(bo);
243 out:
244 	if (unlikely(ret))
245 		nvbo->pin_refcnt--;
246 	return ret;
247 }
248 
249 int
250 nouveau_bo_unpin(struct nouveau_bo *nvbo)
251 {
252 	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
253 	struct ttm_buffer_object *bo = &nvbo->bo;
254 	int ret;
255 
256 	if (--nvbo->pin_refcnt)
257 		return 0;
258 
259 	ret = ttm_bo_reserve(bo, false, false, false, 0);
260 	if (ret)
261 		return ret;
262 
263 	nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
264 
265 	ret = nouveau_bo_validate(nvbo, false, false, false);
266 	if (ret == 0) {
267 		switch (bo->mem.mem_type) {
268 		case TTM_PL_VRAM:
269 			dev_priv->fb_aper_free += bo->mem.size;
270 			break;
271 		case TTM_PL_TT:
272 			dev_priv->gart_info.aper_free += bo->mem.size;
273 			break;
274 		default:
275 			break;
276 		}
277 	}
278 
279 	ttm_bo_unreserve(bo);
280 	return ret;
281 }
282 
283 int
284 nouveau_bo_map(struct nouveau_bo *nvbo)
285 {
286 	int ret;
287 
288 	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
289 	if (ret)
290 		return ret;
291 
292 	ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
293 	ttm_bo_unreserve(&nvbo->bo);
294 	return ret;
295 }
296 
297 void
298 nouveau_bo_unmap(struct nouveau_bo *nvbo)
299 {
300 	if (nvbo)
301 		ttm_bo_kunmap(&nvbo->kmap);
302 }
303 
304 int
305 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
306 		    bool no_wait_reserve, bool no_wait_gpu)
307 {
308 	int ret;
309 
310 	ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
311 			      no_wait_reserve, no_wait_gpu);
312 	if (ret)
313 		return ret;
314 
315 	if (nvbo->vma.node)
316 		nvbo->bo.offset = nvbo->vma.offset;
317 	return 0;
318 }
319 
320 u16
321 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
322 {
323 	bool is_iomem;
324 	u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
325 	mem = &mem[index];
326 	if (is_iomem)
327 		return ioread16_native((void __force __iomem *)mem);
328 	else
329 		return *mem;
330 }
331 
332 void
333 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
334 {
335 	bool is_iomem;
336 	u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
337 	mem = &mem[index];
338 	if (is_iomem)
339 		iowrite16_native(val, (void __force __iomem *)mem);
340 	else
341 		*mem = val;
342 }
343 
344 u32
345 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
346 {
347 	bool is_iomem;
348 	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
349 	mem = &mem[index];
350 	if (is_iomem)
351 		return ioread32_native((void __force __iomem *)mem);
352 	else
353 		return *mem;
354 }
355 
356 void
357 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
358 {
359 	bool is_iomem;
360 	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
361 	mem = &mem[index];
362 	if (is_iomem)
363 		iowrite32_native(val, (void __force __iomem *)mem);
364 	else
365 		*mem = val;
366 }
367 
368 static struct ttm_backend *
369 nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
370 {
371 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
372 	struct drm_device *dev = dev_priv->dev;
373 
374 	switch (dev_priv->gart_info.type) {
375 #if __OS_HAS_AGP
376 	case NOUVEAU_GART_AGP:
377 		return ttm_agp_backend_init(bdev, dev->agp->bridge);
378 #endif
379 	case NOUVEAU_GART_PDMA:
380 	case NOUVEAU_GART_HW:
381 		return nouveau_sgdma_init_ttm(dev);
382 	default:
383 		NV_ERROR(dev, "Unknown GART type %d\n",
384 			 dev_priv->gart_info.type);
385 		break;
386 	}
387 
388 	return NULL;
389 }
390 
391 static int
392 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
393 {
394 	/* We'll do this from user space. */
395 	return 0;
396 }
397 
398 static int
399 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
400 			 struct ttm_mem_type_manager *man)
401 {
402 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
403 	struct drm_device *dev = dev_priv->dev;
404 
405 	switch (type) {
406 	case TTM_PL_SYSTEM:
407 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
408 		man->available_caching = TTM_PL_MASK_CACHING;
409 		man->default_caching = TTM_PL_FLAG_CACHED;
410 		break;
411 	case TTM_PL_VRAM:
412 		if (dev_priv->card_type >= NV_50) {
413 			man->func = &nouveau_vram_manager;
414 			man->io_reserve_fastpath = false;
415 			man->use_io_reserve_lru = true;
416 		} else {
417 			man->func = &ttm_bo_manager_func;
418 		}
419 		man->flags = TTM_MEMTYPE_FLAG_FIXED |
420 			     TTM_MEMTYPE_FLAG_MAPPABLE;
421 		man->available_caching = TTM_PL_FLAG_UNCACHED |
422 					 TTM_PL_FLAG_WC;
423 		man->default_caching = TTM_PL_FLAG_WC;
424 		break;
425 	case TTM_PL_TT:
426 		if (dev_priv->card_type >= NV_50)
427 			man->func = &nouveau_gart_manager;
428 		else
429 			man->func = &ttm_bo_manager_func;
430 		switch (dev_priv->gart_info.type) {
431 		case NOUVEAU_GART_AGP:
432 			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
433 			man->available_caching = TTM_PL_FLAG_UNCACHED |
434 				TTM_PL_FLAG_WC;
435 			man->default_caching = TTM_PL_FLAG_WC;
436 			break;
437 		case NOUVEAU_GART_PDMA:
438 		case NOUVEAU_GART_HW:
439 			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
440 				     TTM_MEMTYPE_FLAG_CMA;
441 			man->available_caching = TTM_PL_MASK_CACHING;
442 			man->default_caching = TTM_PL_FLAG_CACHED;
443 			man->gpu_offset = dev_priv->gart_info.aper_base;
444 			break;
445 		default:
446 			NV_ERROR(dev, "Unknown GART type: %d\n",
447 				 dev_priv->gart_info.type);
448 			return -EINVAL;
449 		}
450 		break;
451 	default:
452 		NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
453 		return -EINVAL;
454 	}
455 	return 0;
456 }
457 
458 static void
459 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
460 {
461 	struct nouveau_bo *nvbo = nouveau_bo(bo);
462 
463 	switch (bo->mem.mem_type) {
464 	case TTM_PL_VRAM:
465 		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
466 					 TTM_PL_FLAG_SYSTEM);
467 		break;
468 	default:
469 		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
470 		break;
471 	}
472 
473 	*pl = nvbo->placement;
474 }
475 
476 
477 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
478  * TTM_PL_{VRAM,TT} directly.
479  */
480 
481 static int
482 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
483 			      struct nouveau_bo *nvbo, bool evict,
484 			      bool no_wait_reserve, bool no_wait_gpu,
485 			      struct ttm_mem_reg *new_mem)
486 {
487 	struct nouveau_fence *fence = NULL;
488 	int ret;
489 
490 	ret = nouveau_fence_new(chan, &fence, true);
491 	if (ret)
492 		return ret;
493 
494 	ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
495 					no_wait_reserve, no_wait_gpu, new_mem);
496 	nouveau_fence_unref(&fence);
497 	return ret;
498 }
499 
500 static int
501 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
502 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
503 {
504 	struct nouveau_mem *old_node = old_mem->mm_node;
505 	struct nouveau_mem *new_node = new_mem->mm_node;
506 	struct nouveau_bo *nvbo = nouveau_bo(bo);
507 	u32 page_count = new_mem->num_pages;
508 	u64 src_offset, dst_offset;
509 	int ret;
510 
511 	src_offset = old_node->tmp_vma.offset;
512 	if (new_node->tmp_vma.node)
513 		dst_offset = new_node->tmp_vma.offset;
514 	else
515 		dst_offset = nvbo->vma.offset;
516 
517 	page_count = new_mem->num_pages;
518 	while (page_count) {
519 		int line_count = (page_count > 2047) ? 2047 : page_count;
520 
521 		ret = RING_SPACE(chan, 12);
522 		if (ret)
523 			return ret;
524 
525 		BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
526 		OUT_RING  (chan, upper_32_bits(dst_offset));
527 		OUT_RING  (chan, lower_32_bits(dst_offset));
528 		BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
529 		OUT_RING  (chan, upper_32_bits(src_offset));
530 		OUT_RING  (chan, lower_32_bits(src_offset));
531 		OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
532 		OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
533 		OUT_RING  (chan, PAGE_SIZE); /* line_length */
534 		OUT_RING  (chan, line_count);
535 		BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
536 		OUT_RING  (chan, 0x00100110);
537 
538 		page_count -= line_count;
539 		src_offset += (PAGE_SIZE * line_count);
540 		dst_offset += (PAGE_SIZE * line_count);
541 	}
542 
543 	return 0;
544 }
545 
546 static int
547 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
548 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
549 {
550 	struct nouveau_mem *old_node = old_mem->mm_node;
551 	struct nouveau_mem *new_node = new_mem->mm_node;
552 	struct nouveau_bo *nvbo = nouveau_bo(bo);
553 	u64 length = (new_mem->num_pages << PAGE_SHIFT);
554 	u64 src_offset, dst_offset;
555 	int ret;
556 
557 	src_offset = old_node->tmp_vma.offset;
558 	if (new_node->tmp_vma.node)
559 		dst_offset = new_node->tmp_vma.offset;
560 	else
561 		dst_offset = nvbo->vma.offset;
562 
563 	while (length) {
564 		u32 amount, stride, height;
565 
566 		amount  = min(length, (u64)(4 * 1024 * 1024));
567 		stride  = 16 * 4;
568 		height  = amount / stride;
569 
570 		if (new_mem->mem_type == TTM_PL_VRAM &&
571 		    nouveau_bo_tile_layout(nvbo)) {
572 			ret = RING_SPACE(chan, 8);
573 			if (ret)
574 				return ret;
575 
576 			BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
577 			OUT_RING  (chan, 0);
578 			OUT_RING  (chan, 0);
579 			OUT_RING  (chan, stride);
580 			OUT_RING  (chan, height);
581 			OUT_RING  (chan, 1);
582 			OUT_RING  (chan, 0);
583 			OUT_RING  (chan, 0);
584 		} else {
585 			ret = RING_SPACE(chan, 2);
586 			if (ret)
587 				return ret;
588 
589 			BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
590 			OUT_RING  (chan, 1);
591 		}
592 		if (old_mem->mem_type == TTM_PL_VRAM &&
593 		    nouveau_bo_tile_layout(nvbo)) {
594 			ret = RING_SPACE(chan, 8);
595 			if (ret)
596 				return ret;
597 
598 			BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
599 			OUT_RING  (chan, 0);
600 			OUT_RING  (chan, 0);
601 			OUT_RING  (chan, stride);
602 			OUT_RING  (chan, height);
603 			OUT_RING  (chan, 1);
604 			OUT_RING  (chan, 0);
605 			OUT_RING  (chan, 0);
606 		} else {
607 			ret = RING_SPACE(chan, 2);
608 			if (ret)
609 				return ret;
610 
611 			BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
612 			OUT_RING  (chan, 1);
613 		}
614 
615 		ret = RING_SPACE(chan, 14);
616 		if (ret)
617 			return ret;
618 
619 		BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
620 		OUT_RING  (chan, upper_32_bits(src_offset));
621 		OUT_RING  (chan, upper_32_bits(dst_offset));
622 		BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
623 		OUT_RING  (chan, lower_32_bits(src_offset));
624 		OUT_RING  (chan, lower_32_bits(dst_offset));
625 		OUT_RING  (chan, stride);
626 		OUT_RING  (chan, stride);
627 		OUT_RING  (chan, stride);
628 		OUT_RING  (chan, height);
629 		OUT_RING  (chan, 0x00000101);
630 		OUT_RING  (chan, 0x00000000);
631 		BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
632 		OUT_RING  (chan, 0);
633 
634 		length -= amount;
635 		src_offset += amount;
636 		dst_offset += amount;
637 	}
638 
639 	return 0;
640 }
641 
642 static inline uint32_t
643 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
644 		      struct nouveau_channel *chan, struct ttm_mem_reg *mem)
645 {
646 	if (mem->mem_type == TTM_PL_TT)
647 		return chan->gart_handle;
648 	return chan->vram_handle;
649 }
650 
651 static int
652 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
653 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
654 {
655 	u32 src_offset = old_mem->start << PAGE_SHIFT;
656 	u32 dst_offset = new_mem->start << PAGE_SHIFT;
657 	u32 page_count = new_mem->num_pages;
658 	int ret;
659 
660 	ret = RING_SPACE(chan, 3);
661 	if (ret)
662 		return ret;
663 
664 	BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
665 	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
666 	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
667 
668 	page_count = new_mem->num_pages;
669 	while (page_count) {
670 		int line_count = (page_count > 2047) ? 2047 : page_count;
671 
672 		ret = RING_SPACE(chan, 11);
673 		if (ret)
674 			return ret;
675 
676 		BEGIN_RING(chan, NvSubM2MF,
677 				 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
678 		OUT_RING  (chan, src_offset);
679 		OUT_RING  (chan, dst_offset);
680 		OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
681 		OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
682 		OUT_RING  (chan, PAGE_SIZE); /* line_length */
683 		OUT_RING  (chan, line_count);
684 		OUT_RING  (chan, 0x00000101);
685 		OUT_RING  (chan, 0x00000000);
686 		BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
687 		OUT_RING  (chan, 0);
688 
689 		page_count -= line_count;
690 		src_offset += (PAGE_SIZE * line_count);
691 		dst_offset += (PAGE_SIZE * line_count);
692 	}
693 
694 	return 0;
695 }
696 
697 static int
698 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
699 		     bool no_wait_reserve, bool no_wait_gpu,
700 		     struct ttm_mem_reg *new_mem)
701 {
702 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
703 	struct nouveau_bo *nvbo = nouveau_bo(bo);
704 	struct ttm_mem_reg *old_mem = &bo->mem;
705 	struct nouveau_channel *chan;
706 	int ret;
707 
708 	chan = nvbo->channel;
709 	if (!chan) {
710 		chan = dev_priv->channel;
711 		mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
712 	}
713 
714 	/* create temporary vma for old memory, this will get cleaned
715 	 * up after ttm destroys the ttm_mem_reg
716 	 */
717 	if (dev_priv->card_type >= NV_50) {
718 		struct nouveau_mem *node = old_mem->mm_node;
719 		if (!node->tmp_vma.node) {
720 			u32 page_shift = nvbo->vma.node->type;
721 			if (old_mem->mem_type == TTM_PL_TT)
722 				page_shift = nvbo->vma.vm->spg_shift;
723 
724 			ret = nouveau_vm_get(chan->vm,
725 					     old_mem->num_pages << PAGE_SHIFT,
726 					     page_shift, NV_MEM_ACCESS_RO,
727 					     &node->tmp_vma);
728 			if (ret)
729 				goto out;
730 		}
731 
732 		if (old_mem->mem_type == TTM_PL_VRAM)
733 			nouveau_vm_map(&node->tmp_vma, node);
734 		else {
735 			nouveau_vm_map_sg(&node->tmp_vma, 0,
736 					  old_mem->num_pages << PAGE_SHIFT,
737 					  node, node->pages);
738 		}
739 	}
740 
741 	if (dev_priv->card_type < NV_50)
742 		ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
743 	else
744 	if (dev_priv->card_type < NV_C0)
745 		ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
746 	else
747 		ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
748 	if (ret == 0) {
749 		ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
750 						    no_wait_reserve,
751 						    no_wait_gpu, new_mem);
752 	}
753 
754 out:
755 	if (chan == dev_priv->channel)
756 		mutex_unlock(&chan->mutex);
757 	return ret;
758 }
759 
760 static int
761 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
762 		      bool no_wait_reserve, bool no_wait_gpu,
763 		      struct ttm_mem_reg *new_mem)
764 {
765 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
766 	u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
767 	struct ttm_placement placement;
768 	struct ttm_mem_reg tmp_mem;
769 	int ret;
770 
771 	placement.fpfn = placement.lpfn = 0;
772 	placement.num_placement = placement.num_busy_placement = 1;
773 	placement.placement = placement.busy_placement = &placement_memtype;
774 
775 	tmp_mem = *new_mem;
776 	tmp_mem.mm_node = NULL;
777 	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
778 	if (ret)
779 		return ret;
780 
781 	ret = ttm_tt_bind(bo->ttm, &tmp_mem);
782 	if (ret)
783 		goto out;
784 
785 	if (dev_priv->card_type >= NV_50) {
786 		struct nouveau_bo *nvbo = nouveau_bo(bo);
787 		struct nouveau_mem *node = tmp_mem.mm_node;
788 		struct nouveau_vma *vma = &nvbo->vma;
789 		if (vma->node->type != vma->vm->spg_shift)
790 			vma = &node->tmp_vma;
791 		nouveau_vm_map_sg(vma, 0, tmp_mem.num_pages << PAGE_SHIFT,
792 				  node, node->pages);
793 	}
794 
795 	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
796 
797 	if (dev_priv->card_type >= NV_50) {
798 		struct nouveau_bo *nvbo = nouveau_bo(bo);
799 		nouveau_vm_unmap(&nvbo->vma);
800 	}
801 
802 	if (ret)
803 		goto out;
804 
805 	ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
806 out:
807 	ttm_bo_mem_put(bo, &tmp_mem);
808 	return ret;
809 }
810 
811 static int
812 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
813 		      bool no_wait_reserve, bool no_wait_gpu,
814 		      struct ttm_mem_reg *new_mem)
815 {
816 	u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
817 	struct ttm_placement placement;
818 	struct ttm_mem_reg tmp_mem;
819 	int ret;
820 
821 	placement.fpfn = placement.lpfn = 0;
822 	placement.num_placement = placement.num_busy_placement = 1;
823 	placement.placement = placement.busy_placement = &placement_memtype;
824 
825 	tmp_mem = *new_mem;
826 	tmp_mem.mm_node = NULL;
827 	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
828 	if (ret)
829 		return ret;
830 
831 	ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
832 	if (ret)
833 		goto out;
834 
835 	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
836 	if (ret)
837 		goto out;
838 
839 out:
840 	ttm_bo_mem_put(bo, &tmp_mem);
841 	return ret;
842 }
843 
844 static void
845 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
846 {
847 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
848 	struct nouveau_mem *node = new_mem->mm_node;
849 	struct nouveau_bo *nvbo = nouveau_bo(bo);
850 	struct nouveau_vma *vma = &nvbo->vma;
851 	struct nouveau_vm *vm = vma->vm;
852 
853 	if (dev_priv->card_type < NV_50)
854 		return;
855 
856 	switch (new_mem->mem_type) {
857 	case TTM_PL_VRAM:
858 		nouveau_vm_map(vma, node);
859 		break;
860 	case TTM_PL_TT:
861 		if (vma->node->type != vm->spg_shift) {
862 			nouveau_vm_unmap(vma);
863 			vma = &node->tmp_vma;
864 		}
865 		nouveau_vm_map_sg(vma, 0, new_mem->num_pages << PAGE_SHIFT,
866 				  node, node->pages);
867 		break;
868 	default:
869 		nouveau_vm_unmap(&nvbo->vma);
870 		break;
871 	}
872 }
873 
874 static int
875 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
876 		   struct nouveau_tile_reg **new_tile)
877 {
878 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
879 	struct drm_device *dev = dev_priv->dev;
880 	struct nouveau_bo *nvbo = nouveau_bo(bo);
881 	u64 offset = new_mem->start << PAGE_SHIFT;
882 
883 	*new_tile = NULL;
884 	if (new_mem->mem_type != TTM_PL_VRAM)
885 		return 0;
886 
887 	if (dev_priv->card_type >= NV_10) {
888 		*new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
889 						nvbo->tile_mode,
890 						nvbo->tile_flags);
891 	}
892 
893 	return 0;
894 }
895 
896 static void
897 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
898 		      struct nouveau_tile_reg *new_tile,
899 		      struct nouveau_tile_reg **old_tile)
900 {
901 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
902 	struct drm_device *dev = dev_priv->dev;
903 
904 	nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
905 	*old_tile = new_tile;
906 }
907 
908 static int
909 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
910 		bool no_wait_reserve, bool no_wait_gpu,
911 		struct ttm_mem_reg *new_mem)
912 {
913 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
914 	struct nouveau_bo *nvbo = nouveau_bo(bo);
915 	struct ttm_mem_reg *old_mem = &bo->mem;
916 	struct nouveau_tile_reg *new_tile = NULL;
917 	int ret = 0;
918 
919 	if (dev_priv->card_type < NV_50) {
920 		ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
921 		if (ret)
922 			return ret;
923 	}
924 
925 	/* Fake bo copy. */
926 	if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
927 		BUG_ON(bo->mem.mm_node != NULL);
928 		bo->mem = *new_mem;
929 		new_mem->mm_node = NULL;
930 		goto out;
931 	}
932 
933 	/* Software copy if the card isn't up and running yet. */
934 	if (!dev_priv->channel) {
935 		ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
936 		goto out;
937 	}
938 
939 	/* Hardware assisted copy. */
940 	if (new_mem->mem_type == TTM_PL_SYSTEM)
941 		ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
942 	else if (old_mem->mem_type == TTM_PL_SYSTEM)
943 		ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
944 	else
945 		ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
946 
947 	if (!ret)
948 		goto out;
949 
950 	/* Fallback to software copy. */
951 	ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
952 
953 out:
954 	if (dev_priv->card_type < NV_50) {
955 		if (ret)
956 			nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
957 		else
958 			nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
959 	}
960 
961 	return ret;
962 }
963 
964 static int
965 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
966 {
967 	return 0;
968 }
969 
970 static int
971 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
972 {
973 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
974 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
975 	struct drm_device *dev = dev_priv->dev;
976 	int ret;
977 
978 	mem->bus.addr = NULL;
979 	mem->bus.offset = 0;
980 	mem->bus.size = mem->num_pages << PAGE_SHIFT;
981 	mem->bus.base = 0;
982 	mem->bus.is_iomem = false;
983 	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
984 		return -EINVAL;
985 	switch (mem->mem_type) {
986 	case TTM_PL_SYSTEM:
987 		/* System memory */
988 		return 0;
989 	case TTM_PL_TT:
990 #if __OS_HAS_AGP
991 		if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
992 			mem->bus.offset = mem->start << PAGE_SHIFT;
993 			mem->bus.base = dev_priv->gart_info.aper_base;
994 			mem->bus.is_iomem = true;
995 		}
996 #endif
997 		break;
998 	case TTM_PL_VRAM:
999 	{
1000 		struct nouveau_mem *node = mem->mm_node;
1001 		u8 page_shift;
1002 
1003 		if (!dev_priv->bar1_vm) {
1004 			mem->bus.offset = mem->start << PAGE_SHIFT;
1005 			mem->bus.base = pci_resource_start(dev->pdev, 1);
1006 			mem->bus.is_iomem = true;
1007 			break;
1008 		}
1009 
1010 		if (dev_priv->card_type == NV_C0)
1011 			page_shift = node->page_shift;
1012 		else
1013 			page_shift = 12;
1014 
1015 		ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
1016 				     page_shift, NV_MEM_ACCESS_RW,
1017 				     &node->bar_vma);
1018 		if (ret)
1019 			return ret;
1020 
1021 		nouveau_vm_map(&node->bar_vma, node);
1022 		if (ret) {
1023 			nouveau_vm_put(&node->bar_vma);
1024 			return ret;
1025 		}
1026 
1027 		mem->bus.offset = node->bar_vma.offset;
1028 		if (dev_priv->card_type == NV_50) /*XXX*/
1029 			mem->bus.offset -= 0x0020000000ULL;
1030 		mem->bus.base = pci_resource_start(dev->pdev, 1);
1031 		mem->bus.is_iomem = true;
1032 	}
1033 		break;
1034 	default:
1035 		return -EINVAL;
1036 	}
1037 	return 0;
1038 }
1039 
1040 static void
1041 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1042 {
1043 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
1044 	struct nouveau_mem *node = mem->mm_node;
1045 
1046 	if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
1047 		return;
1048 
1049 	if (!node->bar_vma.node)
1050 		return;
1051 
1052 	nouveau_vm_unmap(&node->bar_vma);
1053 	nouveau_vm_put(&node->bar_vma);
1054 }
1055 
1056 static int
1057 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1058 {
1059 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1060 	struct nouveau_bo *nvbo = nouveau_bo(bo);
1061 
1062 	/* as long as the bo isn't in vram, and isn't tiled, we've got
1063 	 * nothing to do here.
1064 	 */
1065 	if (bo->mem.mem_type != TTM_PL_VRAM) {
1066 		if (dev_priv->card_type < NV_50 ||
1067 		    !nouveau_bo_tile_layout(nvbo))
1068 			return 0;
1069 	}
1070 
1071 	/* make sure bo is in mappable vram */
1072 	if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
1073 		return 0;
1074 
1075 
1076 	nvbo->placement.fpfn = 0;
1077 	nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
1078 	nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
1079 	return nouveau_bo_validate(nvbo, false, true, false);
1080 }
1081 
1082 void
1083 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1084 {
1085 	struct nouveau_fence *old_fence;
1086 
1087 	if (likely(fence))
1088 		nouveau_fence_ref(fence);
1089 
1090 	spin_lock(&nvbo->bo.bdev->fence_lock);
1091 	old_fence = nvbo->bo.sync_obj;
1092 	nvbo->bo.sync_obj = fence;
1093 	spin_unlock(&nvbo->bo.bdev->fence_lock);
1094 
1095 	nouveau_fence_unref(&old_fence);
1096 }
1097 
1098 struct ttm_bo_driver nouveau_bo_driver = {
1099 	.create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
1100 	.invalidate_caches = nouveau_bo_invalidate_caches,
1101 	.init_mem_type = nouveau_bo_init_mem_type,
1102 	.evict_flags = nouveau_bo_evict_flags,
1103 	.move_notify = nouveau_bo_move_ntfy,
1104 	.move = nouveau_bo_move,
1105 	.verify_access = nouveau_bo_verify_access,
1106 	.sync_obj_signaled = __nouveau_fence_signalled,
1107 	.sync_obj_wait = __nouveau_fence_wait,
1108 	.sync_obj_flush = __nouveau_fence_flush,
1109 	.sync_obj_unref = __nouveau_fence_unref,
1110 	.sync_obj_ref = __nouveau_fence_ref,
1111 	.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1112 	.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1113 	.io_mem_free = &nouveau_ttm_io_mem_free,
1114 };
1115 
1116