1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <drm/ttm/ttm_bo_api.h>
33 #include <drm/ttm/ttm_bo_driver.h>
34 #include <drm/ttm/ttm_placement.h>
35 #include <drm/ttm/ttm_module.h>
36 #include <drm/ttm/ttm_page_alloc.h>
37 #include <drm/drmP.h>
38 #include <drm/amdgpu_drm.h>
39 #include <linux/seq_file.h>
40 #include <linux/slab.h>
41 #include <linux/swiotlb.h>
42 #include <linux/swap.h>
43 #include <linux/pagemap.h>
44 #include <linux/debugfs.h>
45 #include <linux/iommu.h>
46 #include "amdgpu.h"
47 #include "amdgpu_object.h"
48 #include "amdgpu_trace.h"
49 #include "amdgpu_amdkfd.h"
50 #include "bif/bif_4_1_d.h"
51 
52 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
53 
54 static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
55 			     struct ttm_mem_reg *mem, unsigned num_pages,
56 			     uint64_t offset, unsigned window,
57 			     struct amdgpu_ring *ring,
58 			     uint64_t *addr);
59 
60 static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
61 static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
62 
63 /*
64  * Global memory.
65  */
66 static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref)
67 {
68 	return ttm_mem_global_init(ref->object);
69 }
70 
71 static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
72 {
73 	ttm_mem_global_release(ref->object);
74 }
75 
76 static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
77 {
78 	struct drm_global_reference *global_ref;
79 	struct amdgpu_ring *ring;
80 	struct drm_sched_rq *rq;
81 	int r;
82 
83 	adev->mman.mem_global_referenced = false;
84 	global_ref = &adev->mman.mem_global_ref;
85 	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
86 	global_ref->size = sizeof(struct ttm_mem_global);
87 	global_ref->init = &amdgpu_ttm_mem_global_init;
88 	global_ref->release = &amdgpu_ttm_mem_global_release;
89 	r = drm_global_item_ref(global_ref);
90 	if (r) {
91 		DRM_ERROR("Failed setting up TTM memory accounting "
92 			  "subsystem.\n");
93 		goto error_mem;
94 	}
95 
96 	adev->mman.bo_global_ref.mem_glob =
97 		adev->mman.mem_global_ref.object;
98 	global_ref = &adev->mman.bo_global_ref.ref;
99 	global_ref->global_type = DRM_GLOBAL_TTM_BO;
100 	global_ref->size = sizeof(struct ttm_bo_global);
101 	global_ref->init = &ttm_bo_global_init;
102 	global_ref->release = &ttm_bo_global_release;
103 	r = drm_global_item_ref(global_ref);
104 	if (r) {
105 		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
106 		goto error_bo;
107 	}
108 
109 	mutex_init(&adev->mman.gtt_window_lock);
110 
111 	ring = adev->mman.buffer_funcs_ring;
112 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
113 	r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
114 				  rq, amdgpu_sched_jobs, NULL);
115 	if (r) {
116 		DRM_ERROR("Failed setting up TTM BO move run queue.\n");
117 		goto error_entity;
118 	}
119 
120 	adev->mman.mem_global_referenced = true;
121 
122 	return 0;
123 
124 error_entity:
125 	drm_global_item_unref(&adev->mman.bo_global_ref.ref);
126 error_bo:
127 	drm_global_item_unref(&adev->mman.mem_global_ref);
128 error_mem:
129 	return r;
130 }
131 
132 static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
133 {
134 	if (adev->mman.mem_global_referenced) {
135 		drm_sched_entity_fini(adev->mman.entity.sched,
136 				      &adev->mman.entity);
137 		mutex_destroy(&adev->mman.gtt_window_lock);
138 		drm_global_item_unref(&adev->mman.bo_global_ref.ref);
139 		drm_global_item_unref(&adev->mman.mem_global_ref);
140 		adev->mman.mem_global_referenced = false;
141 	}
142 }
143 
144 static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
145 {
146 	return 0;
147 }
148 
149 static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
150 				struct ttm_mem_type_manager *man)
151 {
152 	struct amdgpu_device *adev;
153 
154 	adev = amdgpu_ttm_adev(bdev);
155 
156 	switch (type) {
157 	case TTM_PL_SYSTEM:
158 		/* System memory */
159 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
160 		man->available_caching = TTM_PL_MASK_CACHING;
161 		man->default_caching = TTM_PL_FLAG_CACHED;
162 		break;
163 	case TTM_PL_TT:
164 		man->func = &amdgpu_gtt_mgr_func;
165 		man->gpu_offset = adev->gmc.gart_start;
166 		man->available_caching = TTM_PL_MASK_CACHING;
167 		man->default_caching = TTM_PL_FLAG_CACHED;
168 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
169 		break;
170 	case TTM_PL_VRAM:
171 		/* "On-card" video ram */
172 		man->func = &amdgpu_vram_mgr_func;
173 		man->gpu_offset = adev->gmc.vram_start;
174 		man->flags = TTM_MEMTYPE_FLAG_FIXED |
175 			     TTM_MEMTYPE_FLAG_MAPPABLE;
176 		man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
177 		man->default_caching = TTM_PL_FLAG_WC;
178 		break;
179 	case AMDGPU_PL_GDS:
180 	case AMDGPU_PL_GWS:
181 	case AMDGPU_PL_OA:
182 		/* On-chip GDS memory*/
183 		man->func = &ttm_bo_manager_func;
184 		man->gpu_offset = 0;
185 		man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA;
186 		man->available_caching = TTM_PL_FLAG_UNCACHED;
187 		man->default_caching = TTM_PL_FLAG_UNCACHED;
188 		break;
189 	default:
190 		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
191 		return -EINVAL;
192 	}
193 	return 0;
194 }
195 
196 static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
197 				struct ttm_placement *placement)
198 {
199 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
200 	struct amdgpu_bo *abo;
201 	static const struct ttm_place placements = {
202 		.fpfn = 0,
203 		.lpfn = 0,
204 		.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
205 	};
206 
207 	if (bo->type == ttm_bo_type_sg) {
208 		placement->num_placement = 0;
209 		placement->num_busy_placement = 0;
210 		return;
211 	}
212 
213 	if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
214 		placement->placement = &placements;
215 		placement->busy_placement = &placements;
216 		placement->num_placement = 1;
217 		placement->num_busy_placement = 1;
218 		return;
219 	}
220 	abo = ttm_to_amdgpu_bo(bo);
221 	switch (bo->mem.mem_type) {
222 	case TTM_PL_VRAM:
223 		if (!adev->mman.buffer_funcs_enabled) {
224 			amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
225 		} else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
226 			   !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
227 			unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
228 			struct drm_mm_node *node = bo->mem.mm_node;
229 			unsigned long pages_left;
230 
231 			for (pages_left = bo->mem.num_pages;
232 			     pages_left;
233 			     pages_left -= node->size, node++) {
234 				if (node->start < fpfn)
235 					break;
236 			}
237 
238 			if (!pages_left)
239 				goto gtt;
240 
241 			/* Try evicting to the CPU inaccessible part of VRAM
242 			 * first, but only set GTT as busy placement, so this
243 			 * BO will be evicted to GTT rather than causing other
244 			 * BOs to be evicted from VRAM
245 			 */
246 			amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
247 							 AMDGPU_GEM_DOMAIN_GTT);
248 			abo->placements[0].fpfn = fpfn;
249 			abo->placements[0].lpfn = 0;
250 			abo->placement.busy_placement = &abo->placements[1];
251 			abo->placement.num_busy_placement = 1;
252 		} else {
253 gtt:
254 			amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
255 		}
256 		break;
257 	case TTM_PL_TT:
258 	default:
259 		amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
260 	}
261 	*placement = abo->placement;
262 }
263 
264 static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
265 {
266 	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
267 
268 	/*
269 	 * Don't verify access for KFD BOs. They don't have a GEM
270 	 * object associated with them.
271 	 */
272 	if (abo->kfd_bo)
273 		return 0;
274 
275 	if (amdgpu_ttm_tt_get_usermm(bo->ttm))
276 		return -EPERM;
277 	return drm_vma_node_verify_access(&abo->gem_base.vma_node,
278 					  filp->private_data);
279 }
280 
281 static void amdgpu_move_null(struct ttm_buffer_object *bo,
282 			     struct ttm_mem_reg *new_mem)
283 {
284 	struct ttm_mem_reg *old_mem = &bo->mem;
285 
286 	BUG_ON(old_mem->mm_node != NULL);
287 	*old_mem = *new_mem;
288 	new_mem->mm_node = NULL;
289 }
290 
291 static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
292 				    struct drm_mm_node *mm_node,
293 				    struct ttm_mem_reg *mem)
294 {
295 	uint64_t addr = 0;
296 
297 	if (mem->mem_type != TTM_PL_TT || amdgpu_gtt_mgr_has_gart_addr(mem)) {
298 		addr = mm_node->start << PAGE_SHIFT;
299 		addr += bo->bdev->man[mem->mem_type].gpu_offset;
300 	}
301 	return addr;
302 }
303 
304 /**
305  * amdgpu_find_mm_node - Helper function finds the drm_mm_node
306  *  corresponding to @offset. It also modifies the offset to be
307  *  within the drm_mm_node returned
308  */
309 static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
310 					       unsigned long *offset)
311 {
312 	struct drm_mm_node *mm_node = mem->mm_node;
313 
314 	while (*offset >= (mm_node->size << PAGE_SHIFT)) {
315 		*offset -= (mm_node->size << PAGE_SHIFT);
316 		++mm_node;
317 	}
318 	return mm_node;
319 }
320 
321 /**
322  * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
323  *
324  * The function copies @size bytes from {src->mem + src->offset} to
325  * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
326  * move and different for a BO to BO copy.
327  *
328  * @f: Returns the last fence if multiple jobs are submitted.
329  */
330 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
331 			       struct amdgpu_copy_mem *src,
332 			       struct amdgpu_copy_mem *dst,
333 			       uint64_t size,
334 			       struct reservation_object *resv,
335 			       struct dma_fence **f)
336 {
337 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
338 	struct drm_mm_node *src_mm, *dst_mm;
339 	uint64_t src_node_start, dst_node_start, src_node_size,
340 		 dst_node_size, src_page_offset, dst_page_offset;
341 	struct dma_fence *fence = NULL;
342 	int r = 0;
343 	const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
344 					AMDGPU_GPU_PAGE_SIZE);
345 
346 	if (!adev->mman.buffer_funcs_enabled) {
347 		DRM_ERROR("Trying to move memory with ring turned off.\n");
348 		return -EINVAL;
349 	}
350 
351 	src_mm = amdgpu_find_mm_node(src->mem, &src->offset);
352 	src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) +
353 					     src->offset;
354 	src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset;
355 	src_page_offset = src_node_start & (PAGE_SIZE - 1);
356 
357 	dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset);
358 	dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) +
359 					     dst->offset;
360 	dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset;
361 	dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
362 
363 	mutex_lock(&adev->mman.gtt_window_lock);
364 
365 	while (size) {
366 		unsigned long cur_size;
367 		uint64_t from = src_node_start, to = dst_node_start;
368 		struct dma_fence *next;
369 
370 		/* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
371 		 * begins at an offset, then adjust the size accordingly
372 		 */
373 		cur_size = min3(min(src_node_size, dst_node_size), size,
374 				GTT_MAX_BYTES);
375 		if (cur_size + src_page_offset > GTT_MAX_BYTES ||
376 		    cur_size + dst_page_offset > GTT_MAX_BYTES)
377 			cur_size -= max(src_page_offset, dst_page_offset);
378 
379 		/* Map only what needs to be accessed. Map src to window 0 and
380 		 * dst to window 1
381 		 */
382 		if (src->mem->mem_type == TTM_PL_TT &&
383 		    !amdgpu_gtt_mgr_has_gart_addr(src->mem)) {
384 			r = amdgpu_map_buffer(src->bo, src->mem,
385 					PFN_UP(cur_size + src_page_offset),
386 					src_node_start, 0, ring,
387 					&from);
388 			if (r)
389 				goto error;
390 			/* Adjust the offset because amdgpu_map_buffer returns
391 			 * start of mapped page
392 			 */
393 			from += src_page_offset;
394 		}
395 
396 		if (dst->mem->mem_type == TTM_PL_TT &&
397 		    !amdgpu_gtt_mgr_has_gart_addr(dst->mem)) {
398 			r = amdgpu_map_buffer(dst->bo, dst->mem,
399 					PFN_UP(cur_size + dst_page_offset),
400 					dst_node_start, 1, ring,
401 					&to);
402 			if (r)
403 				goto error;
404 			to += dst_page_offset;
405 		}
406 
407 		r = amdgpu_copy_buffer(ring, from, to, cur_size,
408 				       resv, &next, false, true);
409 		if (r)
410 			goto error;
411 
412 		dma_fence_put(fence);
413 		fence = next;
414 
415 		size -= cur_size;
416 		if (!size)
417 			break;
418 
419 		src_node_size -= cur_size;
420 		if (!src_node_size) {
421 			src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm,
422 							     src->mem);
423 			src_node_size = (src_mm->size << PAGE_SHIFT);
424 		} else {
425 			src_node_start += cur_size;
426 			src_page_offset = src_node_start & (PAGE_SIZE - 1);
427 		}
428 		dst_node_size -= cur_size;
429 		if (!dst_node_size) {
430 			dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm,
431 							     dst->mem);
432 			dst_node_size = (dst_mm->size << PAGE_SHIFT);
433 		} else {
434 			dst_node_start += cur_size;
435 			dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
436 		}
437 	}
438 error:
439 	mutex_unlock(&adev->mman.gtt_window_lock);
440 	if (f)
441 		*f = dma_fence_get(fence);
442 	dma_fence_put(fence);
443 	return r;
444 }
445 
446 
447 static int amdgpu_move_blit(struct ttm_buffer_object *bo,
448 			    bool evict, bool no_wait_gpu,
449 			    struct ttm_mem_reg *new_mem,
450 			    struct ttm_mem_reg *old_mem)
451 {
452 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
453 	struct amdgpu_copy_mem src, dst;
454 	struct dma_fence *fence = NULL;
455 	int r;
456 
457 	src.bo = bo;
458 	dst.bo = bo;
459 	src.mem = old_mem;
460 	dst.mem = new_mem;
461 	src.offset = 0;
462 	dst.offset = 0;
463 
464 	r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
465 				       new_mem->num_pages << PAGE_SHIFT,
466 				       bo->resv, &fence);
467 	if (r)
468 		goto error;
469 
470 	r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
471 	dma_fence_put(fence);
472 	return r;
473 
474 error:
475 	if (fence)
476 		dma_fence_wait(fence, false);
477 	dma_fence_put(fence);
478 	return r;
479 }
480 
481 static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
482 				struct ttm_operation_ctx *ctx,
483 				struct ttm_mem_reg *new_mem)
484 {
485 	struct amdgpu_device *adev;
486 	struct ttm_mem_reg *old_mem = &bo->mem;
487 	struct ttm_mem_reg tmp_mem;
488 	struct ttm_place placements;
489 	struct ttm_placement placement;
490 	int r;
491 
492 	adev = amdgpu_ttm_adev(bo->bdev);
493 	tmp_mem = *new_mem;
494 	tmp_mem.mm_node = NULL;
495 	placement.num_placement = 1;
496 	placement.placement = &placements;
497 	placement.num_busy_placement = 1;
498 	placement.busy_placement = &placements;
499 	placements.fpfn = 0;
500 	placements.lpfn = 0;
501 	placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
502 	r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
503 	if (unlikely(r)) {
504 		return r;
505 	}
506 
507 	r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
508 	if (unlikely(r)) {
509 		goto out_cleanup;
510 	}
511 
512 	r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx);
513 	if (unlikely(r)) {
514 		goto out_cleanup;
515 	}
516 	r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem);
517 	if (unlikely(r)) {
518 		goto out_cleanup;
519 	}
520 	r = ttm_bo_move_ttm(bo, ctx, new_mem);
521 out_cleanup:
522 	ttm_bo_mem_put(bo, &tmp_mem);
523 	return r;
524 }
525 
526 static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
527 				struct ttm_operation_ctx *ctx,
528 				struct ttm_mem_reg *new_mem)
529 {
530 	struct amdgpu_device *adev;
531 	struct ttm_mem_reg *old_mem = &bo->mem;
532 	struct ttm_mem_reg tmp_mem;
533 	struct ttm_placement placement;
534 	struct ttm_place placements;
535 	int r;
536 
537 	adev = amdgpu_ttm_adev(bo->bdev);
538 	tmp_mem = *new_mem;
539 	tmp_mem.mm_node = NULL;
540 	placement.num_placement = 1;
541 	placement.placement = &placements;
542 	placement.num_busy_placement = 1;
543 	placement.busy_placement = &placements;
544 	placements.fpfn = 0;
545 	placements.lpfn = 0;
546 	placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
547 	r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
548 	if (unlikely(r)) {
549 		return r;
550 	}
551 	r = ttm_bo_move_ttm(bo, ctx, &tmp_mem);
552 	if (unlikely(r)) {
553 		goto out_cleanup;
554 	}
555 	r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem);
556 	if (unlikely(r)) {
557 		goto out_cleanup;
558 	}
559 out_cleanup:
560 	ttm_bo_mem_put(bo, &tmp_mem);
561 	return r;
562 }
563 
564 static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
565 			  struct ttm_operation_ctx *ctx,
566 			  struct ttm_mem_reg *new_mem)
567 {
568 	struct amdgpu_device *adev;
569 	struct amdgpu_bo *abo;
570 	struct ttm_mem_reg *old_mem = &bo->mem;
571 	int r;
572 
573 	/* Can't move a pinned BO */
574 	abo = ttm_to_amdgpu_bo(bo);
575 	if (WARN_ON_ONCE(abo->pin_count > 0))
576 		return -EINVAL;
577 
578 	adev = amdgpu_ttm_adev(bo->bdev);
579 
580 	if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
581 		amdgpu_move_null(bo, new_mem);
582 		return 0;
583 	}
584 	if ((old_mem->mem_type == TTM_PL_TT &&
585 	     new_mem->mem_type == TTM_PL_SYSTEM) ||
586 	    (old_mem->mem_type == TTM_PL_SYSTEM &&
587 	     new_mem->mem_type == TTM_PL_TT)) {
588 		/* bind is enough */
589 		amdgpu_move_null(bo, new_mem);
590 		return 0;
591 	}
592 
593 	if (!adev->mman.buffer_funcs_enabled)
594 		goto memcpy;
595 
596 	if (old_mem->mem_type == TTM_PL_VRAM &&
597 	    new_mem->mem_type == TTM_PL_SYSTEM) {
598 		r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem);
599 	} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
600 		   new_mem->mem_type == TTM_PL_VRAM) {
601 		r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem);
602 	} else {
603 		r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu,
604 				     new_mem, old_mem);
605 	}
606 
607 	if (r) {
608 memcpy:
609 		r = ttm_bo_move_memcpy(bo, ctx, new_mem);
610 		if (r) {
611 			return r;
612 		}
613 	}
614 
615 	if (bo->type == ttm_bo_type_device &&
616 	    new_mem->mem_type == TTM_PL_VRAM &&
617 	    old_mem->mem_type != TTM_PL_VRAM) {
618 		/* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
619 		 * accesses the BO after it's moved.
620 		 */
621 		abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
622 	}
623 
624 	/* update statistics */
625 	atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
626 	return 0;
627 }
628 
629 static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
630 {
631 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
632 	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
633 	struct drm_mm_node *mm_node = mem->mm_node;
634 
635 	mem->bus.addr = NULL;
636 	mem->bus.offset = 0;
637 	mem->bus.size = mem->num_pages << PAGE_SHIFT;
638 	mem->bus.base = 0;
639 	mem->bus.is_iomem = false;
640 	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
641 		return -EINVAL;
642 	switch (mem->mem_type) {
643 	case TTM_PL_SYSTEM:
644 		/* system memory */
645 		return 0;
646 	case TTM_PL_TT:
647 		break;
648 	case TTM_PL_VRAM:
649 		mem->bus.offset = mem->start << PAGE_SHIFT;
650 		/* check if it's visible */
651 		if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size)
652 			return -EINVAL;
653 		/* Only physically contiguous buffers apply. In a contiguous
654 		 * buffer, size of the first mm_node would match the number of
655 		 * pages in ttm_mem_reg.
656 		 */
657 		if (adev->mman.aper_base_kaddr &&
658 		    (mm_node->size == mem->num_pages))
659 			mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
660 					mem->bus.offset;
661 
662 		mem->bus.base = adev->gmc.aper_base;
663 		mem->bus.is_iomem = true;
664 		break;
665 	default:
666 		return -EINVAL;
667 	}
668 	return 0;
669 }
670 
671 static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
672 {
673 }
674 
675 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
676 					   unsigned long page_offset)
677 {
678 	struct drm_mm_node *mm;
679 	unsigned long offset = (page_offset << PAGE_SHIFT);
680 
681 	mm = amdgpu_find_mm_node(&bo->mem, &offset);
682 	return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
683 		(offset >> PAGE_SHIFT);
684 }
685 
686 /*
687  * TTM backend functions.
688  */
689 struct amdgpu_ttm_gup_task_list {
690 	struct list_head	list;
691 	struct task_struct	*task;
692 };
693 
694 struct amdgpu_ttm_tt {
695 	struct ttm_dma_tt	ttm;
696 	u64			offset;
697 	uint64_t		userptr;
698 	struct mm_struct	*usermm;
699 	uint32_t		userflags;
700 	spinlock_t              guptasklock;
701 	struct list_head        guptasks;
702 	atomic_t		mmu_invalidations;
703 	uint32_t		last_set_pages;
704 };
705 
706 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
707 {
708 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
709 	unsigned int flags = 0;
710 	unsigned pinned = 0;
711 	int r;
712 
713 	if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
714 		flags |= FOLL_WRITE;
715 
716 	down_read(&current->mm->mmap_sem);
717 
718 	if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
719 		/* check that we only use anonymous memory
720 		   to prevent problems with writeback */
721 		unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
722 		struct vm_area_struct *vma;
723 
724 		vma = find_vma(gtt->usermm, gtt->userptr);
725 		if (!vma || vma->vm_file || vma->vm_end < end) {
726 			up_read(&current->mm->mmap_sem);
727 			return -EPERM;
728 		}
729 	}
730 
731 	do {
732 		unsigned num_pages = ttm->num_pages - pinned;
733 		uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
734 		struct page **p = pages + pinned;
735 		struct amdgpu_ttm_gup_task_list guptask;
736 
737 		guptask.task = current;
738 		spin_lock(&gtt->guptasklock);
739 		list_add(&guptask.list, &gtt->guptasks);
740 		spin_unlock(&gtt->guptasklock);
741 
742 		r = get_user_pages(userptr, num_pages, flags, p, NULL);
743 
744 		spin_lock(&gtt->guptasklock);
745 		list_del(&guptask.list);
746 		spin_unlock(&gtt->guptasklock);
747 
748 		if (r < 0)
749 			goto release_pages;
750 
751 		pinned += r;
752 
753 	} while (pinned < ttm->num_pages);
754 
755 	up_read(&current->mm->mmap_sem);
756 	return 0;
757 
758 release_pages:
759 	release_pages(pages, pinned);
760 	up_read(&current->mm->mmap_sem);
761 	return r;
762 }
763 
764 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
765 {
766 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
767 	unsigned i;
768 
769 	gtt->last_set_pages = atomic_read(&gtt->mmu_invalidations);
770 	for (i = 0; i < ttm->num_pages; ++i) {
771 		if (ttm->pages[i])
772 			put_page(ttm->pages[i]);
773 
774 		ttm->pages[i] = pages ? pages[i] : NULL;
775 	}
776 }
777 
778 void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
779 {
780 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
781 	unsigned i;
782 
783 	for (i = 0; i < ttm->num_pages; ++i) {
784 		struct page *page = ttm->pages[i];
785 
786 		if (!page)
787 			continue;
788 
789 		if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
790 			set_page_dirty(page);
791 
792 		mark_page_accessed(page);
793 	}
794 }
795 
796 /* prepare the sg table with the user pages */
797 static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
798 {
799 	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
800 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
801 	unsigned nents;
802 	int r;
803 
804 	int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
805 	enum dma_data_direction direction = write ?
806 		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
807 
808 	r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
809 				      ttm->num_pages << PAGE_SHIFT,
810 				      GFP_KERNEL);
811 	if (r)
812 		goto release_sg;
813 
814 	r = -ENOMEM;
815 	nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
816 	if (nents != ttm->sg->nents)
817 		goto release_sg;
818 
819 	drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
820 					 gtt->ttm.dma_address, ttm->num_pages);
821 
822 	return 0;
823 
824 release_sg:
825 	kfree(ttm->sg);
826 	return r;
827 }
828 
829 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
830 {
831 	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
832 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
833 
834 	int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
835 	enum dma_data_direction direction = write ?
836 		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
837 
838 	/* double check that we don't free the table twice */
839 	if (!ttm->sg->sgl)
840 		return;
841 
842 	/* free the sg table and pages again */
843 	dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
844 
845 	amdgpu_ttm_tt_mark_user_pages(ttm);
846 
847 	sg_free_table(ttm->sg);
848 }
849 
850 static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
851 				   struct ttm_mem_reg *bo_mem)
852 {
853 	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
854 	struct amdgpu_ttm_tt *gtt = (void*)ttm;
855 	uint64_t flags;
856 	int r = 0;
857 
858 	if (gtt->userptr) {
859 		r = amdgpu_ttm_tt_pin_userptr(ttm);
860 		if (r) {
861 			DRM_ERROR("failed to pin userptr\n");
862 			return r;
863 		}
864 	}
865 	if (!ttm->num_pages) {
866 		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
867 		     ttm->num_pages, bo_mem, ttm);
868 	}
869 
870 	if (bo_mem->mem_type == AMDGPU_PL_GDS ||
871 	    bo_mem->mem_type == AMDGPU_PL_GWS ||
872 	    bo_mem->mem_type == AMDGPU_PL_OA)
873 		return -EINVAL;
874 
875 	if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
876 		gtt->offset = AMDGPU_BO_INVALID_OFFSET;
877 		return 0;
878 	}
879 
880 	flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
881 	gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
882 	r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
883 		ttm->pages, gtt->ttm.dma_address, flags);
884 
885 	if (r)
886 		DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
887 			  ttm->num_pages, gtt->offset);
888 	return r;
889 }
890 
891 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
892 {
893 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
894 	struct ttm_operation_ctx ctx = { false, false };
895 	struct amdgpu_ttm_tt *gtt = (void*)bo->ttm;
896 	struct ttm_mem_reg tmp;
897 	struct ttm_placement placement;
898 	struct ttm_place placements;
899 	uint64_t flags;
900 	int r;
901 
902 	if (bo->mem.mem_type != TTM_PL_TT ||
903 	    amdgpu_gtt_mgr_has_gart_addr(&bo->mem))
904 		return 0;
905 
906 	tmp = bo->mem;
907 	tmp.mm_node = NULL;
908 	placement.num_placement = 1;
909 	placement.placement = &placements;
910 	placement.num_busy_placement = 1;
911 	placement.busy_placement = &placements;
912 	placements.fpfn = 0;
913 	placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
914 	placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
915 		TTM_PL_FLAG_TT;
916 
917 	r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
918 	if (unlikely(r))
919 		return r;
920 
921 	flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
922 	gtt->offset = (u64)tmp.start << PAGE_SHIFT;
923 	r = amdgpu_gart_bind(adev, gtt->offset, bo->ttm->num_pages,
924 			     bo->ttm->pages, gtt->ttm.dma_address, flags);
925 	if (unlikely(r)) {
926 		ttm_bo_mem_put(bo, &tmp);
927 		return r;
928 	}
929 
930 	ttm_bo_mem_put(bo, &bo->mem);
931 	bo->mem = tmp;
932 	bo->offset = (bo->mem.start << PAGE_SHIFT) +
933 		bo->bdev->man[bo->mem.mem_type].gpu_offset;
934 
935 	return 0;
936 }
937 
938 int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
939 {
940 	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
941 	struct amdgpu_ttm_tt *gtt = (void *)tbo->ttm;
942 	uint64_t flags;
943 	int r;
944 
945 	if (!gtt)
946 		return 0;
947 
948 	flags = amdgpu_ttm_tt_pte_flags(adev, &gtt->ttm.ttm, &tbo->mem);
949 	r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
950 			     gtt->ttm.ttm.pages, gtt->ttm.dma_address, flags);
951 	if (r)
952 		DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
953 			  gtt->ttm.ttm.num_pages, gtt->offset);
954 	return r;
955 }
956 
957 static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
958 {
959 	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
960 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
961 	int r;
962 
963 	if (gtt->userptr)
964 		amdgpu_ttm_tt_unpin_userptr(ttm);
965 
966 	if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
967 		return 0;
968 
969 	/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
970 	r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
971 	if (r)
972 		DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
973 			  gtt->ttm.ttm.num_pages, gtt->offset);
974 	return r;
975 }
976 
977 static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
978 {
979 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
980 
981 	ttm_dma_tt_fini(&gtt->ttm);
982 	kfree(gtt);
983 }
984 
985 static struct ttm_backend_func amdgpu_backend_func = {
986 	.bind = &amdgpu_ttm_backend_bind,
987 	.unbind = &amdgpu_ttm_backend_unbind,
988 	.destroy = &amdgpu_ttm_backend_destroy,
989 };
990 
991 static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
992 					   uint32_t page_flags)
993 {
994 	struct amdgpu_device *adev;
995 	struct amdgpu_ttm_tt *gtt;
996 
997 	adev = amdgpu_ttm_adev(bo->bdev);
998 
999 	gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
1000 	if (gtt == NULL) {
1001 		return NULL;
1002 	}
1003 	gtt->ttm.ttm.func = &amdgpu_backend_func;
1004 	if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags)) {
1005 		kfree(gtt);
1006 		return NULL;
1007 	}
1008 	return &gtt->ttm.ttm;
1009 }
1010 
1011 static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
1012 			struct ttm_operation_ctx *ctx)
1013 {
1014 	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
1015 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1016 	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1017 
1018 	if (gtt && gtt->userptr) {
1019 		ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1020 		if (!ttm->sg)
1021 			return -ENOMEM;
1022 
1023 		ttm->page_flags |= TTM_PAGE_FLAG_SG;
1024 		ttm->state = tt_unbound;
1025 		return 0;
1026 	}
1027 
1028 	if (slave && ttm->sg) {
1029 		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1030 						 gtt->ttm.dma_address,
1031 						 ttm->num_pages);
1032 		ttm->state = tt_unbound;
1033 		return 0;
1034 	}
1035 
1036 #ifdef CONFIG_SWIOTLB
1037 	if (adev->need_swiotlb && swiotlb_nr_tbl()) {
1038 		return ttm_dma_populate(&gtt->ttm, adev->dev, ctx);
1039 	}
1040 #endif
1041 
1042 	return ttm_populate_and_map_pages(adev->dev, &gtt->ttm, ctx);
1043 }
1044 
1045 static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
1046 {
1047 	struct amdgpu_device *adev;
1048 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1049 	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1050 
1051 	if (gtt && gtt->userptr) {
1052 		amdgpu_ttm_tt_set_user_pages(ttm, NULL);
1053 		kfree(ttm->sg);
1054 		ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
1055 		return;
1056 	}
1057 
1058 	if (slave)
1059 		return;
1060 
1061 	adev = amdgpu_ttm_adev(ttm->bdev);
1062 
1063 #ifdef CONFIG_SWIOTLB
1064 	if (adev->need_swiotlb && swiotlb_nr_tbl()) {
1065 		ttm_dma_unpopulate(&gtt->ttm, adev->dev);
1066 		return;
1067 	}
1068 #endif
1069 
1070 	ttm_unmap_and_unpopulate_pages(adev->dev, &gtt->ttm);
1071 }
1072 
1073 int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
1074 			      uint32_t flags)
1075 {
1076 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1077 
1078 	if (gtt == NULL)
1079 		return -EINVAL;
1080 
1081 	gtt->userptr = addr;
1082 	gtt->usermm = current->mm;
1083 	gtt->userflags = flags;
1084 	spin_lock_init(&gtt->guptasklock);
1085 	INIT_LIST_HEAD(&gtt->guptasks);
1086 	atomic_set(&gtt->mmu_invalidations, 0);
1087 	gtt->last_set_pages = 0;
1088 
1089 	return 0;
1090 }
1091 
1092 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1093 {
1094 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1095 
1096 	if (gtt == NULL)
1097 		return NULL;
1098 
1099 	return gtt->usermm;
1100 }
1101 
1102 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1103 				  unsigned long end)
1104 {
1105 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1106 	struct amdgpu_ttm_gup_task_list *entry;
1107 	unsigned long size;
1108 
1109 	if (gtt == NULL || !gtt->userptr)
1110 		return false;
1111 
1112 	size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
1113 	if (gtt->userptr > end || gtt->userptr + size <= start)
1114 		return false;
1115 
1116 	spin_lock(&gtt->guptasklock);
1117 	list_for_each_entry(entry, &gtt->guptasks, list) {
1118 		if (entry->task == current) {
1119 			spin_unlock(&gtt->guptasklock);
1120 			return false;
1121 		}
1122 	}
1123 	spin_unlock(&gtt->guptasklock);
1124 
1125 	atomic_inc(&gtt->mmu_invalidations);
1126 
1127 	return true;
1128 }
1129 
1130 bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
1131 				       int *last_invalidated)
1132 {
1133 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1134 	int prev_invalidated = *last_invalidated;
1135 
1136 	*last_invalidated = atomic_read(&gtt->mmu_invalidations);
1137 	return prev_invalidated != *last_invalidated;
1138 }
1139 
1140 bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
1141 {
1142 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1143 
1144 	if (gtt == NULL || !gtt->userptr)
1145 		return false;
1146 
1147 	return atomic_read(&gtt->mmu_invalidations) != gtt->last_set_pages;
1148 }
1149 
1150 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1151 {
1152 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1153 
1154 	if (gtt == NULL)
1155 		return false;
1156 
1157 	return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1158 }
1159 
1160 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1161 				 struct ttm_mem_reg *mem)
1162 {
1163 	uint64_t flags = 0;
1164 
1165 	if (mem && mem->mem_type != TTM_PL_SYSTEM)
1166 		flags |= AMDGPU_PTE_VALID;
1167 
1168 	if (mem && mem->mem_type == TTM_PL_TT) {
1169 		flags |= AMDGPU_PTE_SYSTEM;
1170 
1171 		if (ttm->caching_state == tt_cached)
1172 			flags |= AMDGPU_PTE_SNOOPED;
1173 	}
1174 
1175 	flags |= adev->gart.gart_pte_flags;
1176 	flags |= AMDGPU_PTE_READABLE;
1177 
1178 	if (!amdgpu_ttm_tt_is_readonly(ttm))
1179 		flags |= AMDGPU_PTE_WRITEABLE;
1180 
1181 	return flags;
1182 }
1183 
1184 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1185 					    const struct ttm_place *place)
1186 {
1187 	unsigned long num_pages = bo->mem.num_pages;
1188 	struct drm_mm_node *node = bo->mem.mm_node;
1189 	struct reservation_object_list *flist;
1190 	struct dma_fence *f;
1191 	int i;
1192 
1193 	/* If bo is a KFD BO, check if the bo belongs to the current process.
1194 	 * If true, then return false as any KFD process needs all its BOs to
1195 	 * be resident to run successfully
1196 	 */
1197 	flist = reservation_object_get_list(bo->resv);
1198 	if (flist) {
1199 		for (i = 0; i < flist->shared_count; ++i) {
1200 			f = rcu_dereference_protected(flist->shared[i],
1201 				reservation_object_held(bo->resv));
1202 			if (amdkfd_fence_check_mm(f, current->mm))
1203 				return false;
1204 		}
1205 	}
1206 
1207 	switch (bo->mem.mem_type) {
1208 	case TTM_PL_TT:
1209 		return true;
1210 
1211 	case TTM_PL_VRAM:
1212 		/* Check each drm MM node individually */
1213 		while (num_pages) {
1214 			if (place->fpfn < (node->start + node->size) &&
1215 			    !(place->lpfn && place->lpfn <= node->start))
1216 				return true;
1217 
1218 			num_pages -= node->size;
1219 			++node;
1220 		}
1221 		return false;
1222 
1223 	default:
1224 		break;
1225 	}
1226 
1227 	return ttm_bo_eviction_valuable(bo, place);
1228 }
1229 
1230 static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1231 				    unsigned long offset,
1232 				    void *buf, int len, int write)
1233 {
1234 	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1235 	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1236 	struct drm_mm_node *nodes;
1237 	uint32_t value = 0;
1238 	int ret = 0;
1239 	uint64_t pos;
1240 	unsigned long flags;
1241 
1242 	if (bo->mem.mem_type != TTM_PL_VRAM)
1243 		return -EIO;
1244 
1245 	nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
1246 	pos = (nodes->start << PAGE_SHIFT) + offset;
1247 
1248 	while (len && pos < adev->gmc.mc_vram_size) {
1249 		uint64_t aligned_pos = pos & ~(uint64_t)3;
1250 		uint32_t bytes = 4 - (pos & 3);
1251 		uint32_t shift = (pos & 3) * 8;
1252 		uint32_t mask = 0xffffffff << shift;
1253 
1254 		if (len < bytes) {
1255 			mask &= 0xffffffff >> (bytes - len) * 8;
1256 			bytes = len;
1257 		}
1258 
1259 		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1260 		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
1261 		WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
1262 		if (!write || mask != 0xffffffff)
1263 			value = RREG32_NO_KIQ(mmMM_DATA);
1264 		if (write) {
1265 			value &= ~mask;
1266 			value |= (*(uint32_t *)buf << shift) & mask;
1267 			WREG32_NO_KIQ(mmMM_DATA, value);
1268 		}
1269 		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1270 		if (!write) {
1271 			value = (value & mask) >> shift;
1272 			memcpy(buf, &value, bytes);
1273 		}
1274 
1275 		ret += bytes;
1276 		buf = (uint8_t *)buf + bytes;
1277 		pos += bytes;
1278 		len -= bytes;
1279 		if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
1280 			++nodes;
1281 			pos = (nodes->start << PAGE_SHIFT);
1282 		}
1283 	}
1284 
1285 	return ret;
1286 }
1287 
1288 static struct ttm_bo_driver amdgpu_bo_driver = {
1289 	.ttm_tt_create = &amdgpu_ttm_tt_create,
1290 	.ttm_tt_populate = &amdgpu_ttm_tt_populate,
1291 	.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1292 	.invalidate_caches = &amdgpu_invalidate_caches,
1293 	.init_mem_type = &amdgpu_init_mem_type,
1294 	.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1295 	.evict_flags = &amdgpu_evict_flags,
1296 	.move = &amdgpu_bo_move,
1297 	.verify_access = &amdgpu_verify_access,
1298 	.move_notify = &amdgpu_bo_move_notify,
1299 	.fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
1300 	.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1301 	.io_mem_free = &amdgpu_ttm_io_mem_free,
1302 	.io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1303 	.access_memory = &amdgpu_ttm_access_memory
1304 };
1305 
1306 /*
1307  * Firmware Reservation functions
1308  */
1309 /**
1310  * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1311  *
1312  * @adev: amdgpu_device pointer
1313  *
1314  * free fw reserved vram if it has been reserved.
1315  */
1316 static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
1317 {
1318 	amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
1319 		NULL, &adev->fw_vram_usage.va);
1320 }
1321 
1322 /**
1323  * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1324  *
1325  * @adev: amdgpu_device pointer
1326  *
1327  * create bo vram reservation from fw.
1328  */
1329 static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1330 {
1331 	struct ttm_operation_ctx ctx = { false, false };
1332 	int r = 0;
1333 	int i;
1334 	u64 vram_size = adev->gmc.visible_vram_size;
1335 	u64 offset = adev->fw_vram_usage.start_offset;
1336 	u64 size = adev->fw_vram_usage.size;
1337 	struct amdgpu_bo *bo;
1338 
1339 	adev->fw_vram_usage.va = NULL;
1340 	adev->fw_vram_usage.reserved_bo = NULL;
1341 
1342 	if (adev->fw_vram_usage.size > 0 &&
1343 		adev->fw_vram_usage.size <= vram_size) {
1344 
1345 		r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, PAGE_SIZE,
1346 				     AMDGPU_GEM_DOMAIN_VRAM,
1347 				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
1348 				     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
1349 				     ttm_bo_type_kernel, NULL,
1350 				     &adev->fw_vram_usage.reserved_bo);
1351 		if (r)
1352 			goto error_create;
1353 
1354 		r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
1355 		if (r)
1356 			goto error_reserve;
1357 
1358 		/* remove the original mem node and create a new one at the
1359 		 * request position
1360 		 */
1361 		bo = adev->fw_vram_usage.reserved_bo;
1362 		offset = ALIGN(offset, PAGE_SIZE);
1363 		for (i = 0; i < bo->placement.num_placement; ++i) {
1364 			bo->placements[i].fpfn = offset >> PAGE_SHIFT;
1365 			bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
1366 		}
1367 
1368 		ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
1369 		r = ttm_bo_mem_space(&bo->tbo, &bo->placement,
1370 				     &bo->tbo.mem, &ctx);
1371 		if (r)
1372 			goto error_pin;
1373 
1374 		r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
1375 			AMDGPU_GEM_DOMAIN_VRAM,
1376 			adev->fw_vram_usage.start_offset,
1377 			(adev->fw_vram_usage.start_offset +
1378 			adev->fw_vram_usage.size), NULL);
1379 		if (r)
1380 			goto error_pin;
1381 		r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
1382 			&adev->fw_vram_usage.va);
1383 		if (r)
1384 			goto error_kmap;
1385 
1386 		amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
1387 	}
1388 	return r;
1389 
1390 error_kmap:
1391 	amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
1392 error_pin:
1393 	amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
1394 error_reserve:
1395 	amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
1396 error_create:
1397 	adev->fw_vram_usage.va = NULL;
1398 	adev->fw_vram_usage.reserved_bo = NULL;
1399 	return r;
1400 }
1401 
1402 int amdgpu_ttm_init(struct amdgpu_device *adev)
1403 {
1404 	uint64_t gtt_size;
1405 	int r;
1406 	u64 vis_vram_limit;
1407 
1408 	r = amdgpu_ttm_global_init(adev);
1409 	if (r) {
1410 		return r;
1411 	}
1412 	/* No others user of address space so set it to 0 */
1413 	r = ttm_bo_device_init(&adev->mman.bdev,
1414 			       adev->mman.bo_global_ref.ref.object,
1415 			       &amdgpu_bo_driver,
1416 			       adev->ddev->anon_inode->i_mapping,
1417 			       DRM_FILE_PAGE_OFFSET,
1418 			       adev->need_dma32);
1419 	if (r) {
1420 		DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
1421 		return r;
1422 	}
1423 	adev->mman.initialized = true;
1424 
1425 	/* We opt to avoid OOM on system pages allocations */
1426 	adev->mman.bdev.no_retry = true;
1427 
1428 	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
1429 				adev->gmc.real_vram_size >> PAGE_SHIFT);
1430 	if (r) {
1431 		DRM_ERROR("Failed initializing VRAM heap.\n");
1432 		return r;
1433 	}
1434 
1435 	/* Reduce size of CPU-visible VRAM if requested */
1436 	vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
1437 	if (amdgpu_vis_vram_limit > 0 &&
1438 	    vis_vram_limit <= adev->gmc.visible_vram_size)
1439 		adev->gmc.visible_vram_size = vis_vram_limit;
1440 
1441 	/* Change the size here instead of the init above so only lpfn is affected */
1442 	amdgpu_ttm_set_buffer_funcs_status(adev, false);
1443 #ifdef CONFIG_64BIT
1444 	adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
1445 						adev->gmc.visible_vram_size);
1446 #endif
1447 
1448 	/*
1449 	 *The reserved vram for firmware must be pinned to the specified
1450 	 *place on the VRAM, so reserve it early.
1451 	 */
1452 	r = amdgpu_ttm_fw_reserve_vram_init(adev);
1453 	if (r) {
1454 		return r;
1455 	}
1456 
1457 	r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
1458 				    AMDGPU_GEM_DOMAIN_VRAM,
1459 				    &adev->stolen_vga_memory,
1460 				    NULL, NULL);
1461 	if (r)
1462 		return r;
1463 	DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1464 		 (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
1465 
1466 	if (amdgpu_gtt_size == -1) {
1467 		struct sysinfo si;
1468 
1469 		si_meminfo(&si);
1470 		gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
1471 			       adev->gmc.mc_vram_size),
1472 			       ((uint64_t)si.totalram * si.mem_unit * 3/4));
1473 	}
1474 	else
1475 		gtt_size = (uint64_t)amdgpu_gtt_size << 20;
1476 	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
1477 	if (r) {
1478 		DRM_ERROR("Failed initializing GTT heap.\n");
1479 		return r;
1480 	}
1481 	DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1482 		 (unsigned)(gtt_size / (1024 * 1024)));
1483 
1484 	adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
1485 	adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
1486 	adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT;
1487 	adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT;
1488 	adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT;
1489 	adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT;
1490 	adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT;
1491 	adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT;
1492 	adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT;
1493 	/* GDS Memory */
1494 	if (adev->gds.mem.total_size) {
1495 		r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
1496 				   adev->gds.mem.total_size >> PAGE_SHIFT);
1497 		if (r) {
1498 			DRM_ERROR("Failed initializing GDS heap.\n");
1499 			return r;
1500 		}
1501 	}
1502 
1503 	/* GWS */
1504 	if (adev->gds.gws.total_size) {
1505 		r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
1506 				   adev->gds.gws.total_size >> PAGE_SHIFT);
1507 		if (r) {
1508 			DRM_ERROR("Failed initializing gws heap.\n");
1509 			return r;
1510 		}
1511 	}
1512 
1513 	/* OA */
1514 	if (adev->gds.oa.total_size) {
1515 		r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
1516 				   adev->gds.oa.total_size >> PAGE_SHIFT);
1517 		if (r) {
1518 			DRM_ERROR("Failed initializing oa heap.\n");
1519 			return r;
1520 		}
1521 	}
1522 
1523 	r = amdgpu_ttm_debugfs_init(adev);
1524 	if (r) {
1525 		DRM_ERROR("Failed to init debugfs\n");
1526 		return r;
1527 	}
1528 	return 0;
1529 }
1530 
1531 void amdgpu_ttm_fini(struct amdgpu_device *adev)
1532 {
1533 	if (!adev->mman.initialized)
1534 		return;
1535 
1536 	amdgpu_ttm_debugfs_fini(adev);
1537 	amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
1538 	amdgpu_ttm_fw_reserve_vram_fini(adev);
1539 	if (adev->mman.aper_base_kaddr)
1540 		iounmap(adev->mman.aper_base_kaddr);
1541 	adev->mman.aper_base_kaddr = NULL;
1542 
1543 	ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
1544 	ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
1545 	if (adev->gds.mem.total_size)
1546 		ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS);
1547 	if (adev->gds.gws.total_size)
1548 		ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
1549 	if (adev->gds.oa.total_size)
1550 		ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
1551 	ttm_bo_device_release(&adev->mman.bdev);
1552 	amdgpu_ttm_global_fini(adev);
1553 	adev->mman.initialized = false;
1554 	DRM_INFO("amdgpu: ttm finalized\n");
1555 }
1556 
1557 /**
1558  * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
1559  *
1560  * @adev: amdgpu_device pointer
1561  * @enable: true when we can use buffer functions.
1562  *
1563  * Enable/disable use of buffer functions during suspend/resume. This should
1564  * only be called at bootup or when userspace isn't running.
1565  */
1566 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
1567 {
1568 	struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
1569 	uint64_t size;
1570 
1571 	if (!adev->mman.initialized || adev->in_gpu_reset)
1572 		return;
1573 
1574 	/* this just adjusts TTM size idea, which sets lpfn to the correct value */
1575 	if (enable)
1576 		size = adev->gmc.real_vram_size;
1577 	else
1578 		size = adev->gmc.visible_vram_size;
1579 	man->size = size >> PAGE_SHIFT;
1580 	adev->mman.buffer_funcs_enabled = enable;
1581 }
1582 
1583 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
1584 {
1585 	struct drm_file *file_priv;
1586 	struct amdgpu_device *adev;
1587 
1588 	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
1589 		return -EINVAL;
1590 
1591 	file_priv = filp->private_data;
1592 	adev = file_priv->minor->dev->dev_private;
1593 	if (adev == NULL)
1594 		return -EINVAL;
1595 
1596 	return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
1597 }
1598 
1599 static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
1600 			     struct ttm_mem_reg *mem, unsigned num_pages,
1601 			     uint64_t offset, unsigned window,
1602 			     struct amdgpu_ring *ring,
1603 			     uint64_t *addr)
1604 {
1605 	struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
1606 	struct amdgpu_device *adev = ring->adev;
1607 	struct ttm_tt *ttm = bo->ttm;
1608 	struct amdgpu_job *job;
1609 	unsigned num_dw, num_bytes;
1610 	dma_addr_t *dma_address;
1611 	struct dma_fence *fence;
1612 	uint64_t src_addr, dst_addr;
1613 	uint64_t flags;
1614 	int r;
1615 
1616 	BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
1617 	       AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
1618 
1619 	*addr = adev->gmc.gart_start;
1620 	*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
1621 		AMDGPU_GPU_PAGE_SIZE;
1622 
1623 	num_dw = adev->mman.buffer_funcs->copy_num_dw;
1624 	while (num_dw & 0x7)
1625 		num_dw++;
1626 
1627 	num_bytes = num_pages * 8;
1628 
1629 	r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
1630 	if (r)
1631 		return r;
1632 
1633 	src_addr = num_dw * 4;
1634 	src_addr += job->ibs[0].gpu_addr;
1635 
1636 	dst_addr = adev->gart.table_addr;
1637 	dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
1638 	amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
1639 				dst_addr, num_bytes);
1640 
1641 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
1642 	WARN_ON(job->ibs[0].length_dw > num_dw);
1643 
1644 	dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
1645 	flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
1646 	r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
1647 			    &job->ibs[0].ptr[num_dw]);
1648 	if (r)
1649 		goto error_free;
1650 
1651 	r = amdgpu_job_submit(job, ring, &adev->mman.entity,
1652 			      AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
1653 	if (r)
1654 		goto error_free;
1655 
1656 	dma_fence_put(fence);
1657 
1658 	return r;
1659 
1660 error_free:
1661 	amdgpu_job_free(job);
1662 	return r;
1663 }
1664 
1665 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
1666 		       uint64_t dst_offset, uint32_t byte_count,
1667 		       struct reservation_object *resv,
1668 		       struct dma_fence **fence, bool direct_submit,
1669 		       bool vm_needs_flush)
1670 {
1671 	struct amdgpu_device *adev = ring->adev;
1672 	struct amdgpu_job *job;
1673 
1674 	uint32_t max_bytes;
1675 	unsigned num_loops, num_dw;
1676 	unsigned i;
1677 	int r;
1678 
1679 	if (direct_submit && !ring->ready) {
1680 		DRM_ERROR("Trying to move memory with ring turned off.\n");
1681 		return -EINVAL;
1682 	}
1683 
1684 	max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
1685 	num_loops = DIV_ROUND_UP(byte_count, max_bytes);
1686 	num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
1687 
1688 	/* for IB padding */
1689 	while (num_dw & 0x7)
1690 		num_dw++;
1691 
1692 	r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
1693 	if (r)
1694 		return r;
1695 
1696 	job->vm_needs_flush = vm_needs_flush;
1697 	if (resv) {
1698 		r = amdgpu_sync_resv(adev, &job->sync, resv,
1699 				     AMDGPU_FENCE_OWNER_UNDEFINED,
1700 				     false);
1701 		if (r) {
1702 			DRM_ERROR("sync failed (%d).\n", r);
1703 			goto error_free;
1704 		}
1705 	}
1706 
1707 	for (i = 0; i < num_loops; i++) {
1708 		uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
1709 
1710 		amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
1711 					dst_offset, cur_size_in_bytes);
1712 
1713 		src_offset += cur_size_in_bytes;
1714 		dst_offset += cur_size_in_bytes;
1715 		byte_count -= cur_size_in_bytes;
1716 	}
1717 
1718 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
1719 	WARN_ON(job->ibs[0].length_dw > num_dw);
1720 	if (direct_submit) {
1721 		r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
1722 				       NULL, fence);
1723 		job->fence = dma_fence_get(*fence);
1724 		if (r)
1725 			DRM_ERROR("Error scheduling IBs (%d)\n", r);
1726 		amdgpu_job_free(job);
1727 	} else {
1728 		r = amdgpu_job_submit(job, ring, &adev->mman.entity,
1729 				      AMDGPU_FENCE_OWNER_UNDEFINED, fence);
1730 		if (r)
1731 			goto error_free;
1732 	}
1733 
1734 	return r;
1735 
1736 error_free:
1737 	amdgpu_job_free(job);
1738 	return r;
1739 }
1740 
1741 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
1742 		       uint32_t src_data,
1743 		       struct reservation_object *resv,
1744 		       struct dma_fence **fence)
1745 {
1746 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1747 	uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
1748 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
1749 
1750 	struct drm_mm_node *mm_node;
1751 	unsigned long num_pages;
1752 	unsigned int num_loops, num_dw;
1753 
1754 	struct amdgpu_job *job;
1755 	int r;
1756 
1757 	if (!adev->mman.buffer_funcs_enabled) {
1758 		DRM_ERROR("Trying to clear memory with ring turned off.\n");
1759 		return -EINVAL;
1760 	}
1761 
1762 	if (bo->tbo.mem.mem_type == TTM_PL_TT) {
1763 		r = amdgpu_ttm_alloc_gart(&bo->tbo);
1764 		if (r)
1765 			return r;
1766 	}
1767 
1768 	num_pages = bo->tbo.num_pages;
1769 	mm_node = bo->tbo.mem.mm_node;
1770 	num_loops = 0;
1771 	while (num_pages) {
1772 		uint32_t byte_count = mm_node->size << PAGE_SHIFT;
1773 
1774 		num_loops += DIV_ROUND_UP(byte_count, max_bytes);
1775 		num_pages -= mm_node->size;
1776 		++mm_node;
1777 	}
1778 	num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
1779 
1780 	/* for IB padding */
1781 	num_dw += 64;
1782 
1783 	r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
1784 	if (r)
1785 		return r;
1786 
1787 	if (resv) {
1788 		r = amdgpu_sync_resv(adev, &job->sync, resv,
1789 				     AMDGPU_FENCE_OWNER_UNDEFINED, false);
1790 		if (r) {
1791 			DRM_ERROR("sync failed (%d).\n", r);
1792 			goto error_free;
1793 		}
1794 	}
1795 
1796 	num_pages = bo->tbo.num_pages;
1797 	mm_node = bo->tbo.mem.mm_node;
1798 
1799 	while (num_pages) {
1800 		uint32_t byte_count = mm_node->size << PAGE_SHIFT;
1801 		uint64_t dst_addr;
1802 
1803 		dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
1804 		while (byte_count) {
1805 			uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
1806 
1807 			amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
1808 						dst_addr, cur_size_in_bytes);
1809 
1810 			dst_addr += cur_size_in_bytes;
1811 			byte_count -= cur_size_in_bytes;
1812 		}
1813 
1814 		num_pages -= mm_node->size;
1815 		++mm_node;
1816 	}
1817 
1818 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
1819 	WARN_ON(job->ibs[0].length_dw > num_dw);
1820 	r = amdgpu_job_submit(job, ring, &adev->mman.entity,
1821 			      AMDGPU_FENCE_OWNER_UNDEFINED, fence);
1822 	if (r)
1823 		goto error_free;
1824 
1825 	return 0;
1826 
1827 error_free:
1828 	amdgpu_job_free(job);
1829 	return r;
1830 }
1831 
1832 #if defined(CONFIG_DEBUG_FS)
1833 
1834 static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
1835 {
1836 	struct drm_info_node *node = (struct drm_info_node *)m->private;
1837 	unsigned ttm_pl = *(int *)node->info_ent->data;
1838 	struct drm_device *dev = node->minor->dev;
1839 	struct amdgpu_device *adev = dev->dev_private;
1840 	struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl];
1841 	struct drm_printer p = drm_seq_file_printer(m);
1842 
1843 	man->func->debug(man, &p);
1844 	return 0;
1845 }
1846 
1847 static int ttm_pl_vram = TTM_PL_VRAM;
1848 static int ttm_pl_tt = TTM_PL_TT;
1849 
1850 static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
1851 	{"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram},
1852 	{"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
1853 	{"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
1854 #ifdef CONFIG_SWIOTLB
1855 	{"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
1856 #endif
1857 };
1858 
1859 static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
1860 				    size_t size, loff_t *pos)
1861 {
1862 	struct amdgpu_device *adev = file_inode(f)->i_private;
1863 	ssize_t result = 0;
1864 	int r;
1865 
1866 	if (size & 0x3 || *pos & 0x3)
1867 		return -EINVAL;
1868 
1869 	if (*pos >= adev->gmc.mc_vram_size)
1870 		return -ENXIO;
1871 
1872 	while (size) {
1873 		unsigned long flags;
1874 		uint32_t value;
1875 
1876 		if (*pos >= adev->gmc.mc_vram_size)
1877 			return result;
1878 
1879 		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1880 		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
1881 		WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
1882 		value = RREG32_NO_KIQ(mmMM_DATA);
1883 		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1884 
1885 		r = put_user(value, (uint32_t *)buf);
1886 		if (r)
1887 			return r;
1888 
1889 		result += 4;
1890 		buf += 4;
1891 		*pos += 4;
1892 		size -= 4;
1893 	}
1894 
1895 	return result;
1896 }
1897 
1898 static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
1899 				    size_t size, loff_t *pos)
1900 {
1901 	struct amdgpu_device *adev = file_inode(f)->i_private;
1902 	ssize_t result = 0;
1903 	int r;
1904 
1905 	if (size & 0x3 || *pos & 0x3)
1906 		return -EINVAL;
1907 
1908 	if (*pos >= adev->gmc.mc_vram_size)
1909 		return -ENXIO;
1910 
1911 	while (size) {
1912 		unsigned long flags;
1913 		uint32_t value;
1914 
1915 		if (*pos >= adev->gmc.mc_vram_size)
1916 			return result;
1917 
1918 		r = get_user(value, (uint32_t *)buf);
1919 		if (r)
1920 			return r;
1921 
1922 		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1923 		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
1924 		WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
1925 		WREG32_NO_KIQ(mmMM_DATA, value);
1926 		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1927 
1928 		result += 4;
1929 		buf += 4;
1930 		*pos += 4;
1931 		size -= 4;
1932 	}
1933 
1934 	return result;
1935 }
1936 
1937 static const struct file_operations amdgpu_ttm_vram_fops = {
1938 	.owner = THIS_MODULE,
1939 	.read = amdgpu_ttm_vram_read,
1940 	.write = amdgpu_ttm_vram_write,
1941 	.llseek = default_llseek,
1942 };
1943 
1944 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
1945 
1946 static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
1947 				   size_t size, loff_t *pos)
1948 {
1949 	struct amdgpu_device *adev = file_inode(f)->i_private;
1950 	ssize_t result = 0;
1951 	int r;
1952 
1953 	while (size) {
1954 		loff_t p = *pos / PAGE_SIZE;
1955 		unsigned off = *pos & ~PAGE_MASK;
1956 		size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
1957 		struct page *page;
1958 		void *ptr;
1959 
1960 		if (p >= adev->gart.num_cpu_pages)
1961 			return result;
1962 
1963 		page = adev->gart.pages[p];
1964 		if (page) {
1965 			ptr = kmap(page);
1966 			ptr += off;
1967 
1968 			r = copy_to_user(buf, ptr, cur_size);
1969 			kunmap(adev->gart.pages[p]);
1970 		} else
1971 			r = clear_user(buf, cur_size);
1972 
1973 		if (r)
1974 			return -EFAULT;
1975 
1976 		result += cur_size;
1977 		buf += cur_size;
1978 		*pos += cur_size;
1979 		size -= cur_size;
1980 	}
1981 
1982 	return result;
1983 }
1984 
1985 static const struct file_operations amdgpu_ttm_gtt_fops = {
1986 	.owner = THIS_MODULE,
1987 	.read = amdgpu_ttm_gtt_read,
1988 	.llseek = default_llseek
1989 };
1990 
1991 #endif
1992 
1993 static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
1994 				 size_t size, loff_t *pos)
1995 {
1996 	struct amdgpu_device *adev = file_inode(f)->i_private;
1997 	struct iommu_domain *dom;
1998 	ssize_t result = 0;
1999 	int r;
2000 
2001 	dom = iommu_get_domain_for_dev(adev->dev);
2002 
2003 	while (size) {
2004 		phys_addr_t addr = *pos & PAGE_MASK;
2005 		loff_t off = *pos & ~PAGE_MASK;
2006 		size_t bytes = PAGE_SIZE - off;
2007 		unsigned long pfn;
2008 		struct page *p;
2009 		void *ptr;
2010 
2011 		bytes = bytes < size ? bytes : size;
2012 
2013 		addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2014 
2015 		pfn = addr >> PAGE_SHIFT;
2016 		if (!pfn_valid(pfn))
2017 			return -EPERM;
2018 
2019 		p = pfn_to_page(pfn);
2020 		if (p->mapping != adev->mman.bdev.dev_mapping)
2021 			return -EPERM;
2022 
2023 		ptr = kmap(p);
2024 		r = copy_to_user(buf, ptr + off, bytes);
2025 		kunmap(p);
2026 		if (r)
2027 			return -EFAULT;
2028 
2029 		size -= bytes;
2030 		*pos += bytes;
2031 		result += bytes;
2032 	}
2033 
2034 	return result;
2035 }
2036 
2037 static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2038 				 size_t size, loff_t *pos)
2039 {
2040 	struct amdgpu_device *adev = file_inode(f)->i_private;
2041 	struct iommu_domain *dom;
2042 	ssize_t result = 0;
2043 	int r;
2044 
2045 	dom = iommu_get_domain_for_dev(adev->dev);
2046 
2047 	while (size) {
2048 		phys_addr_t addr = *pos & PAGE_MASK;
2049 		loff_t off = *pos & ~PAGE_MASK;
2050 		size_t bytes = PAGE_SIZE - off;
2051 		unsigned long pfn;
2052 		struct page *p;
2053 		void *ptr;
2054 
2055 		bytes = bytes < size ? bytes : size;
2056 
2057 		addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2058 
2059 		pfn = addr >> PAGE_SHIFT;
2060 		if (!pfn_valid(pfn))
2061 			return -EPERM;
2062 
2063 		p = pfn_to_page(pfn);
2064 		if (p->mapping != adev->mman.bdev.dev_mapping)
2065 			return -EPERM;
2066 
2067 		ptr = kmap(p);
2068 		r = copy_from_user(ptr + off, buf, bytes);
2069 		kunmap(p);
2070 		if (r)
2071 			return -EFAULT;
2072 
2073 		size -= bytes;
2074 		*pos += bytes;
2075 		result += bytes;
2076 	}
2077 
2078 	return result;
2079 }
2080 
2081 static const struct file_operations amdgpu_ttm_iomem_fops = {
2082 	.owner = THIS_MODULE,
2083 	.read = amdgpu_iomem_read,
2084 	.write = amdgpu_iomem_write,
2085 	.llseek = default_llseek
2086 };
2087 
2088 static const struct {
2089 	char *name;
2090 	const struct file_operations *fops;
2091 	int domain;
2092 } ttm_debugfs_entries[] = {
2093 	{ "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM },
2094 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2095 	{ "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
2096 #endif
2097 	{ "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM },
2098 };
2099 
2100 #endif
2101 
2102 static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2103 {
2104 #if defined(CONFIG_DEBUG_FS)
2105 	unsigned count;
2106 
2107 	struct drm_minor *minor = adev->ddev->primary;
2108 	struct dentry *ent, *root = minor->debugfs_root;
2109 
2110 	for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
2111 		ent = debugfs_create_file(
2112 				ttm_debugfs_entries[count].name,
2113 				S_IFREG | S_IRUGO, root,
2114 				adev,
2115 				ttm_debugfs_entries[count].fops);
2116 		if (IS_ERR(ent))
2117 			return PTR_ERR(ent);
2118 		if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
2119 			i_size_write(ent->d_inode, adev->gmc.mc_vram_size);
2120 		else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
2121 			i_size_write(ent->d_inode, adev->gmc.gart_size);
2122 		adev->mman.debugfs_entries[count] = ent;
2123 	}
2124 
2125 	count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
2126 
2127 #ifdef CONFIG_SWIOTLB
2128 	if (!(adev->need_swiotlb && swiotlb_nr_tbl()))
2129 		--count;
2130 #endif
2131 
2132 	return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
2133 #else
2134 	return 0;
2135 #endif
2136 }
2137 
2138 static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
2139 {
2140 #if defined(CONFIG_DEBUG_FS)
2141 	unsigned i;
2142 
2143 	for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++)
2144 		debugfs_remove(adev->mman.debugfs_entries[i]);
2145 #endif
2146 }
2147