1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <ttm/ttm_bo_api.h>
33 #include <ttm/ttm_bo_driver.h>
34 #include <ttm/ttm_placement.h>
35 #include <ttm/ttm_module.h>
36 #include <ttm/ttm_page_alloc.h>
37 #include <drm/drmP.h>
38 #include <drm/radeon_drm.h>
39 #include <linux/seq_file.h>
40 #include <linux/slab.h>
41 #include "radeon_reg.h"
42 #include "radeon.h"
43 
44 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
45 
46 static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
47 
48 static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
49 {
50 	struct radeon_mman *mman;
51 	struct radeon_device *rdev;
52 
53 	mman = container_of(bdev, struct radeon_mman, bdev);
54 	rdev = container_of(mman, struct radeon_device, mman);
55 	return rdev;
56 }
57 
58 
59 /*
60  * Global memory.
61  */
62 static int radeon_ttm_mem_global_init(struct drm_global_reference *ref)
63 {
64 	return ttm_mem_global_init(ref->object);
65 }
66 
67 static void radeon_ttm_mem_global_release(struct drm_global_reference *ref)
68 {
69 	ttm_mem_global_release(ref->object);
70 }
71 
72 static int radeon_ttm_global_init(struct radeon_device *rdev)
73 {
74 	struct drm_global_reference *global_ref;
75 	int r;
76 
77 	rdev->mman.mem_global_referenced = false;
78 	global_ref = &rdev->mman.mem_global_ref;
79 	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
80 	global_ref->size = sizeof(struct ttm_mem_global);
81 	global_ref->init = &radeon_ttm_mem_global_init;
82 	global_ref->release = &radeon_ttm_mem_global_release;
83 	r = drm_global_item_ref(global_ref);
84 	if (r != 0) {
85 		DRM_ERROR("Failed setting up TTM memory accounting "
86 			  "subsystem.\n");
87 		return r;
88 	}
89 
90 	rdev->mman.bo_global_ref.mem_glob =
91 		rdev->mman.mem_global_ref.object;
92 	global_ref = &rdev->mman.bo_global_ref.ref;
93 	global_ref->global_type = DRM_GLOBAL_TTM_BO;
94 	global_ref->size = sizeof(struct ttm_bo_global);
95 	global_ref->init = &ttm_bo_global_init;
96 	global_ref->release = &ttm_bo_global_release;
97 	r = drm_global_item_ref(global_ref);
98 	if (r != 0) {
99 		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
100 		drm_global_item_unref(&rdev->mman.mem_global_ref);
101 		return r;
102 	}
103 
104 	rdev->mman.mem_global_referenced = true;
105 	return 0;
106 }
107 
108 static void radeon_ttm_global_fini(struct radeon_device *rdev)
109 {
110 	if (rdev->mman.mem_global_referenced) {
111 		drm_global_item_unref(&rdev->mman.bo_global_ref.ref);
112 		drm_global_item_unref(&rdev->mman.mem_global_ref);
113 		rdev->mman.mem_global_referenced = false;
114 	}
115 }
116 
117 struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);
118 
119 static struct ttm_backend*
120 radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev)
121 {
122 	struct radeon_device *rdev;
123 
124 	rdev = radeon_get_rdev(bdev);
125 #if __OS_HAS_AGP
126 	if (rdev->flags & RADEON_IS_AGP) {
127 		return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge);
128 	} else
129 #endif
130 	{
131 		return radeon_ttm_backend_create(rdev);
132 	}
133 }
134 
135 static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
136 {
137 	return 0;
138 }
139 
140 static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
141 				struct ttm_mem_type_manager *man)
142 {
143 	struct radeon_device *rdev;
144 
145 	rdev = radeon_get_rdev(bdev);
146 
147 	switch (type) {
148 	case TTM_PL_SYSTEM:
149 		/* System memory */
150 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
151 		man->available_caching = TTM_PL_MASK_CACHING;
152 		man->default_caching = TTM_PL_FLAG_CACHED;
153 		break;
154 	case TTM_PL_TT:
155 		man->gpu_offset = rdev->mc.gtt_start;
156 		man->available_caching = TTM_PL_MASK_CACHING;
157 		man->default_caching = TTM_PL_FLAG_CACHED;
158 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
159 #if __OS_HAS_AGP
160 		if (rdev->flags & RADEON_IS_AGP) {
161 			if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
162 				DRM_ERROR("AGP is not enabled for memory type %u\n",
163 					  (unsigned)type);
164 				return -EINVAL;
165 			}
166 			if (!rdev->ddev->agp->cant_use_aperture)
167 				man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
168 			man->available_caching = TTM_PL_FLAG_UNCACHED |
169 						 TTM_PL_FLAG_WC;
170 			man->default_caching = TTM_PL_FLAG_WC;
171 		}
172 #endif
173 		break;
174 	case TTM_PL_VRAM:
175 		/* "On-card" video ram */
176 		man->gpu_offset = rdev->mc.vram_start;
177 		man->flags = TTM_MEMTYPE_FLAG_FIXED |
178 			     TTM_MEMTYPE_FLAG_MAPPABLE;
179 		man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
180 		man->default_caching = TTM_PL_FLAG_WC;
181 		break;
182 	default:
183 		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
184 		return -EINVAL;
185 	}
186 	return 0;
187 }
188 
189 static void radeon_evict_flags(struct ttm_buffer_object *bo,
190 				struct ttm_placement *placement)
191 {
192 	struct radeon_bo *rbo;
193 	static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
194 
195 	if (!radeon_ttm_bo_is_radeon_bo(bo)) {
196 		placement->fpfn = 0;
197 		placement->lpfn = 0;
198 		placement->placement = &placements;
199 		placement->busy_placement = &placements;
200 		placement->num_placement = 1;
201 		placement->num_busy_placement = 1;
202 		return;
203 	}
204 	rbo = container_of(bo, struct radeon_bo, tbo);
205 	switch (bo->mem.mem_type) {
206 	case TTM_PL_VRAM:
207 		if (rbo->rdev->cp.ready == false)
208 			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
209 		else
210 			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
211 		break;
212 	case TTM_PL_TT:
213 	default:
214 		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
215 	}
216 	*placement = rbo->placement;
217 }
218 
219 static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
220 {
221 	return 0;
222 }
223 
224 static void radeon_move_null(struct ttm_buffer_object *bo,
225 			     struct ttm_mem_reg *new_mem)
226 {
227 	struct ttm_mem_reg *old_mem = &bo->mem;
228 
229 	BUG_ON(old_mem->mm_node != NULL);
230 	*old_mem = *new_mem;
231 	new_mem->mm_node = NULL;
232 }
233 
234 static int radeon_move_blit(struct ttm_buffer_object *bo,
235 			bool evict, int no_wait_reserve, bool no_wait_gpu,
236 			struct ttm_mem_reg *new_mem,
237 			struct ttm_mem_reg *old_mem)
238 {
239 	struct radeon_device *rdev;
240 	uint64_t old_start, new_start;
241 	struct radeon_fence *fence;
242 	int r;
243 
244 	rdev = radeon_get_rdev(bo->bdev);
245 	r = radeon_fence_create(rdev, &fence);
246 	if (unlikely(r)) {
247 		return r;
248 	}
249 	old_start = old_mem->mm_node->start << PAGE_SHIFT;
250 	new_start = new_mem->mm_node->start << PAGE_SHIFT;
251 
252 	switch (old_mem->mem_type) {
253 	case TTM_PL_VRAM:
254 		old_start += rdev->mc.vram_start;
255 		break;
256 	case TTM_PL_TT:
257 		old_start += rdev->mc.gtt_start;
258 		break;
259 	default:
260 		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
261 		return -EINVAL;
262 	}
263 	switch (new_mem->mem_type) {
264 	case TTM_PL_VRAM:
265 		new_start += rdev->mc.vram_start;
266 		break;
267 	case TTM_PL_TT:
268 		new_start += rdev->mc.gtt_start;
269 		break;
270 	default:
271 		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
272 		return -EINVAL;
273 	}
274 	if (!rdev->cp.ready) {
275 		DRM_ERROR("Trying to move memory with CP turned off.\n");
276 		return -EINVAL;
277 	}
278 	r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
279 	/* FIXME: handle copy error */
280 	r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
281 				      evict, no_wait_reserve, no_wait_gpu, new_mem);
282 	radeon_fence_unref(&fence);
283 	return r;
284 }
285 
286 static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
287 				bool evict, bool interruptible,
288 				bool no_wait_reserve, bool no_wait_gpu,
289 				struct ttm_mem_reg *new_mem)
290 {
291 	struct radeon_device *rdev;
292 	struct ttm_mem_reg *old_mem = &bo->mem;
293 	struct ttm_mem_reg tmp_mem;
294 	u32 placements;
295 	struct ttm_placement placement;
296 	int r;
297 
298 	rdev = radeon_get_rdev(bo->bdev);
299 	tmp_mem = *new_mem;
300 	tmp_mem.mm_node = NULL;
301 	placement.fpfn = 0;
302 	placement.lpfn = 0;
303 	placement.num_placement = 1;
304 	placement.placement = &placements;
305 	placement.num_busy_placement = 1;
306 	placement.busy_placement = &placements;
307 	placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
308 	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
309 			     interruptible, no_wait_reserve, no_wait_gpu);
310 	if (unlikely(r)) {
311 		return r;
312 	}
313 
314 	r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
315 	if (unlikely(r)) {
316 		goto out_cleanup;
317 	}
318 
319 	r = ttm_tt_bind(bo->ttm, &tmp_mem);
320 	if (unlikely(r)) {
321 		goto out_cleanup;
322 	}
323 	r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
324 	if (unlikely(r)) {
325 		goto out_cleanup;
326 	}
327 	r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
328 out_cleanup:
329 	if (tmp_mem.mm_node) {
330 		struct ttm_bo_global *glob = rdev->mman.bdev.glob;
331 
332 		spin_lock(&glob->lru_lock);
333 		drm_mm_put_block(tmp_mem.mm_node);
334 		spin_unlock(&glob->lru_lock);
335 		return r;
336 	}
337 	return r;
338 }
339 
340 static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
341 				bool evict, bool interruptible,
342 				bool no_wait_reserve, bool no_wait_gpu,
343 				struct ttm_mem_reg *new_mem)
344 {
345 	struct radeon_device *rdev;
346 	struct ttm_mem_reg *old_mem = &bo->mem;
347 	struct ttm_mem_reg tmp_mem;
348 	struct ttm_placement placement;
349 	u32 placements;
350 	int r;
351 
352 	rdev = radeon_get_rdev(bo->bdev);
353 	tmp_mem = *new_mem;
354 	tmp_mem.mm_node = NULL;
355 	placement.fpfn = 0;
356 	placement.lpfn = 0;
357 	placement.num_placement = 1;
358 	placement.placement = &placements;
359 	placement.num_busy_placement = 1;
360 	placement.busy_placement = &placements;
361 	placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
362 	r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
363 	if (unlikely(r)) {
364 		return r;
365 	}
366 	r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
367 	if (unlikely(r)) {
368 		goto out_cleanup;
369 	}
370 	r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
371 	if (unlikely(r)) {
372 		goto out_cleanup;
373 	}
374 out_cleanup:
375 	if (tmp_mem.mm_node) {
376 		struct ttm_bo_global *glob = rdev->mman.bdev.glob;
377 
378 		spin_lock(&glob->lru_lock);
379 		drm_mm_put_block(tmp_mem.mm_node);
380 		spin_unlock(&glob->lru_lock);
381 		return r;
382 	}
383 	return r;
384 }
385 
386 static int radeon_bo_move(struct ttm_buffer_object *bo,
387 			bool evict, bool interruptible,
388 			bool no_wait_reserve, bool no_wait_gpu,
389 			struct ttm_mem_reg *new_mem)
390 {
391 	struct radeon_device *rdev;
392 	struct ttm_mem_reg *old_mem = &bo->mem;
393 	int r;
394 
395 	rdev = radeon_get_rdev(bo->bdev);
396 	if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
397 		radeon_move_null(bo, new_mem);
398 		return 0;
399 	}
400 	if ((old_mem->mem_type == TTM_PL_TT &&
401 	     new_mem->mem_type == TTM_PL_SYSTEM) ||
402 	    (old_mem->mem_type == TTM_PL_SYSTEM &&
403 	     new_mem->mem_type == TTM_PL_TT)) {
404 		/* bind is enough */
405 		radeon_move_null(bo, new_mem);
406 		return 0;
407 	}
408 	if (!rdev->cp.ready || rdev->asic->copy == NULL) {
409 		/* use memcpy */
410 		goto memcpy;
411 	}
412 
413 	if (old_mem->mem_type == TTM_PL_VRAM &&
414 	    new_mem->mem_type == TTM_PL_SYSTEM) {
415 		r = radeon_move_vram_ram(bo, evict, interruptible,
416 					no_wait_reserve, no_wait_gpu, new_mem);
417 	} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
418 		   new_mem->mem_type == TTM_PL_VRAM) {
419 		r = radeon_move_ram_vram(bo, evict, interruptible,
420 					    no_wait_reserve, no_wait_gpu, new_mem);
421 	} else {
422 		r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
423 	}
424 
425 	if (r) {
426 memcpy:
427 		r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
428 	}
429 	return r;
430 }
431 
432 static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
433 {
434 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
435 	struct radeon_device *rdev = radeon_get_rdev(bdev);
436 
437 	mem->bus.addr = NULL;
438 	mem->bus.offset = 0;
439 	mem->bus.size = mem->num_pages << PAGE_SHIFT;
440 	mem->bus.base = 0;
441 	mem->bus.is_iomem = false;
442 	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
443 		return -EINVAL;
444 	switch (mem->mem_type) {
445 	case TTM_PL_SYSTEM:
446 		/* system memory */
447 		return 0;
448 	case TTM_PL_TT:
449 #if __OS_HAS_AGP
450 		if (rdev->flags & RADEON_IS_AGP) {
451 			/* RADEON_IS_AGP is set only if AGP is active */
452 			mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
453 			mem->bus.base = rdev->mc.agp_base;
454 			mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
455 		}
456 #endif
457 		break;
458 	case TTM_PL_VRAM:
459 		mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
460 		/* check if it's visible */
461 		if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
462 			return -EINVAL;
463 		mem->bus.base = rdev->mc.aper_base;
464 		mem->bus.is_iomem = true;
465 		break;
466 	default:
467 		return -EINVAL;
468 	}
469 	return 0;
470 }
471 
472 static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
473 {
474 }
475 
476 static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
477 				bool lazy, bool interruptible)
478 {
479 	return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
480 }
481 
482 static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg)
483 {
484 	return 0;
485 }
486 
487 static void radeon_sync_obj_unref(void **sync_obj)
488 {
489 	radeon_fence_unref((struct radeon_fence **)sync_obj);
490 }
491 
492 static void *radeon_sync_obj_ref(void *sync_obj)
493 {
494 	return radeon_fence_ref((struct radeon_fence *)sync_obj);
495 }
496 
497 static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
498 {
499 	return radeon_fence_signaled((struct radeon_fence *)sync_obj);
500 }
501 
502 static struct ttm_bo_driver radeon_bo_driver = {
503 	.create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
504 	.invalidate_caches = &radeon_invalidate_caches,
505 	.init_mem_type = &radeon_init_mem_type,
506 	.evict_flags = &radeon_evict_flags,
507 	.move = &radeon_bo_move,
508 	.verify_access = &radeon_verify_access,
509 	.sync_obj_signaled = &radeon_sync_obj_signaled,
510 	.sync_obj_wait = &radeon_sync_obj_wait,
511 	.sync_obj_flush = &radeon_sync_obj_flush,
512 	.sync_obj_unref = &radeon_sync_obj_unref,
513 	.sync_obj_ref = &radeon_sync_obj_ref,
514 	.move_notify = &radeon_bo_move_notify,
515 	.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
516 	.io_mem_reserve = &radeon_ttm_io_mem_reserve,
517 	.io_mem_free = &radeon_ttm_io_mem_free,
518 };
519 
520 int radeon_ttm_init(struct radeon_device *rdev)
521 {
522 	int r;
523 
524 	r = radeon_ttm_global_init(rdev);
525 	if (r) {
526 		return r;
527 	}
528 	/* No others user of address space so set it to 0 */
529 	r = ttm_bo_device_init(&rdev->mman.bdev,
530 			       rdev->mman.bo_global_ref.ref.object,
531 			       &radeon_bo_driver, DRM_FILE_PAGE_OFFSET,
532 			       rdev->need_dma32);
533 	if (r) {
534 		DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
535 		return r;
536 	}
537 	rdev->mman.initialized = true;
538 	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
539 				rdev->mc.real_vram_size >> PAGE_SHIFT);
540 	if (r) {
541 		DRM_ERROR("Failed initializing VRAM heap.\n");
542 		return r;
543 	}
544 	r = radeon_bo_create(rdev, NULL, 256 * 1024, true,
545 				RADEON_GEM_DOMAIN_VRAM,
546 				&rdev->stollen_vga_memory);
547 	if (r) {
548 		return r;
549 	}
550 	r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
551 	if (r)
552 		return r;
553 	r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
554 	radeon_bo_unreserve(rdev->stollen_vga_memory);
555 	if (r) {
556 		radeon_bo_unref(&rdev->stollen_vga_memory);
557 		return r;
558 	}
559 	DRM_INFO("radeon: %uM of VRAM memory ready\n",
560 		 (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
561 	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
562 				rdev->mc.gtt_size >> PAGE_SHIFT);
563 	if (r) {
564 		DRM_ERROR("Failed initializing GTT heap.\n");
565 		return r;
566 	}
567 	DRM_INFO("radeon: %uM of GTT memory ready.\n",
568 		 (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
569 	if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
570 		rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
571 	}
572 
573 	r = radeon_ttm_debugfs_init(rdev);
574 	if (r) {
575 		DRM_ERROR("Failed to init debugfs\n");
576 		return r;
577 	}
578 	return 0;
579 }
580 
581 void radeon_ttm_fini(struct radeon_device *rdev)
582 {
583 	int r;
584 
585 	if (!rdev->mman.initialized)
586 		return;
587 	if (rdev->stollen_vga_memory) {
588 		r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
589 		if (r == 0) {
590 			radeon_bo_unpin(rdev->stollen_vga_memory);
591 			radeon_bo_unreserve(rdev->stollen_vga_memory);
592 		}
593 		radeon_bo_unref(&rdev->stollen_vga_memory);
594 	}
595 	ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
596 	ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
597 	ttm_bo_device_release(&rdev->mman.bdev);
598 	radeon_gart_fini(rdev);
599 	radeon_ttm_global_fini(rdev);
600 	rdev->mman.initialized = false;
601 	DRM_INFO("radeon: ttm finalized\n");
602 }
603 
604 static struct vm_operations_struct radeon_ttm_vm_ops;
605 static const struct vm_operations_struct *ttm_vm_ops = NULL;
606 
607 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
608 {
609 	struct ttm_buffer_object *bo;
610 	struct radeon_device *rdev;
611 	int r;
612 
613 	bo = (struct ttm_buffer_object *)vma->vm_private_data;
614 	if (bo == NULL) {
615 		return VM_FAULT_NOPAGE;
616 	}
617 	rdev = radeon_get_rdev(bo->bdev);
618 	mutex_lock(&rdev->vram_mutex);
619 	r = ttm_vm_ops->fault(vma, vmf);
620 	mutex_unlock(&rdev->vram_mutex);
621 	return r;
622 }
623 
624 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
625 {
626 	struct drm_file *file_priv;
627 	struct radeon_device *rdev;
628 	int r;
629 
630 	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
631 		return drm_mmap(filp, vma);
632 	}
633 
634 	file_priv = (struct drm_file *)filp->private_data;
635 	rdev = file_priv->minor->dev->dev_private;
636 	if (rdev == NULL) {
637 		return -EINVAL;
638 	}
639 	r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
640 	if (unlikely(r != 0)) {
641 		return r;
642 	}
643 	if (unlikely(ttm_vm_ops == NULL)) {
644 		ttm_vm_ops = vma->vm_ops;
645 		radeon_ttm_vm_ops = *ttm_vm_ops;
646 		radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
647 	}
648 	vma->vm_ops = &radeon_ttm_vm_ops;
649 	return 0;
650 }
651 
652 
653 /*
654  * TTM backend functions.
655  */
656 struct radeon_ttm_backend {
657 	struct ttm_backend		backend;
658 	struct radeon_device		*rdev;
659 	unsigned long			num_pages;
660 	struct page			**pages;
661 	struct page			*dummy_read_page;
662 	bool				populated;
663 	bool				bound;
664 	unsigned			offset;
665 };
666 
667 static int radeon_ttm_backend_populate(struct ttm_backend *backend,
668 				       unsigned long num_pages,
669 				       struct page **pages,
670 				       struct page *dummy_read_page)
671 {
672 	struct radeon_ttm_backend *gtt;
673 
674 	gtt = container_of(backend, struct radeon_ttm_backend, backend);
675 	gtt->pages = pages;
676 	gtt->num_pages = num_pages;
677 	gtt->dummy_read_page = dummy_read_page;
678 	gtt->populated = true;
679 	return 0;
680 }
681 
682 static void radeon_ttm_backend_clear(struct ttm_backend *backend)
683 {
684 	struct radeon_ttm_backend *gtt;
685 
686 	gtt = container_of(backend, struct radeon_ttm_backend, backend);
687 	gtt->pages = NULL;
688 	gtt->num_pages = 0;
689 	gtt->dummy_read_page = NULL;
690 	gtt->populated = false;
691 	gtt->bound = false;
692 }
693 
694 
695 static int radeon_ttm_backend_bind(struct ttm_backend *backend,
696 				   struct ttm_mem_reg *bo_mem)
697 {
698 	struct radeon_ttm_backend *gtt;
699 	int r;
700 
701 	gtt = container_of(backend, struct radeon_ttm_backend, backend);
702 	gtt->offset = bo_mem->mm_node->start << PAGE_SHIFT;
703 	if (!gtt->num_pages) {
704 		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend);
705 	}
706 	r = radeon_gart_bind(gtt->rdev, gtt->offset,
707 			     gtt->num_pages, gtt->pages);
708 	if (r) {
709 		DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
710 			  gtt->num_pages, gtt->offset);
711 		return r;
712 	}
713 	gtt->bound = true;
714 	return 0;
715 }
716 
717 static int radeon_ttm_backend_unbind(struct ttm_backend *backend)
718 {
719 	struct radeon_ttm_backend *gtt;
720 
721 	gtt = container_of(backend, struct radeon_ttm_backend, backend);
722 	radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages);
723 	gtt->bound = false;
724 	return 0;
725 }
726 
727 static void radeon_ttm_backend_destroy(struct ttm_backend *backend)
728 {
729 	struct radeon_ttm_backend *gtt;
730 
731 	gtt = container_of(backend, struct radeon_ttm_backend, backend);
732 	if (gtt->bound) {
733 		radeon_ttm_backend_unbind(backend);
734 	}
735 	kfree(gtt);
736 }
737 
738 static struct ttm_backend_func radeon_backend_func = {
739 	.populate = &radeon_ttm_backend_populate,
740 	.clear = &radeon_ttm_backend_clear,
741 	.bind = &radeon_ttm_backend_bind,
742 	.unbind = &radeon_ttm_backend_unbind,
743 	.destroy = &radeon_ttm_backend_destroy,
744 };
745 
746 struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
747 {
748 	struct radeon_ttm_backend *gtt;
749 
750 	gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL);
751 	if (gtt == NULL) {
752 		return NULL;
753 	}
754 	gtt->backend.bdev = &rdev->mman.bdev;
755 	gtt->backend.flags = 0;
756 	gtt->backend.func = &radeon_backend_func;
757 	gtt->rdev = rdev;
758 	gtt->pages = NULL;
759 	gtt->num_pages = 0;
760 	gtt->dummy_read_page = NULL;
761 	gtt->populated = false;
762 	gtt->bound = false;
763 	return &gtt->backend;
764 }
765 
766 #define RADEON_DEBUGFS_MEM_TYPES 2
767 
768 #if defined(CONFIG_DEBUG_FS)
769 static int radeon_mm_dump_table(struct seq_file *m, void *data)
770 {
771 	struct drm_info_node *node = (struct drm_info_node *)m->private;
772 	struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
773 	struct drm_device *dev = node->minor->dev;
774 	struct radeon_device *rdev = dev->dev_private;
775 	int ret;
776 	struct ttm_bo_global *glob = rdev->mman.bdev.glob;
777 
778 	spin_lock(&glob->lru_lock);
779 	ret = drm_mm_dump_table(m, mm);
780 	spin_unlock(&glob->lru_lock);
781 	return ret;
782 }
783 #endif
784 
785 static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
786 {
787 #if defined(CONFIG_DEBUG_FS)
788 	static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1];
789 	static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+1][32];
790 	unsigned i;
791 
792 	for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
793 		if (i == 0)
794 			sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
795 		else
796 			sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
797 		radeon_mem_types_list[i].name = radeon_mem_types_names[i];
798 		radeon_mem_types_list[i].show = &radeon_mm_dump_table;
799 		radeon_mem_types_list[i].driver_features = 0;
800 		if (i == 0)
801 			radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].manager;
802 		else
803 			radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager;
804 
805 	}
806 	/* Add ttm page pool to debugfs */
807 	sprintf(radeon_mem_types_names[i], "ttm_page_pool");
808 	radeon_mem_types_list[i].name = radeon_mem_types_names[i];
809 	radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
810 	radeon_mem_types_list[i].driver_features = 0;
811 	radeon_mem_types_list[i].data = NULL;
812 	return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES+1);
813 
814 #endif
815 	return 0;
816 }
817