1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 
33 #include <linux/dma-mapping.h>
34 #include <linux/iommu.h>
35 #include <linux/hmm.h>
36 #include <linux/pagemap.h>
37 #include <linux/sched/task.h>
38 #include <linux/sched/mm.h>
39 #include <linux/seq_file.h>
40 #include <linux/slab.h>
41 #include <linux/swap.h>
42 #include <linux/swiotlb.h>
43 #include <linux/dma-buf.h>
44 #include <linux/sizes.h>
45 
46 #include <drm/ttm/ttm_bo_api.h>
47 #include <drm/ttm/ttm_bo_driver.h>
48 #include <drm/ttm/ttm_placement.h>
49 #include <drm/ttm/ttm_module.h>
50 
51 #include <drm/drm_debugfs.h>
52 #include <drm/amdgpu_drm.h>
53 
54 #include "amdgpu.h"
55 #include "amdgpu_object.h"
56 #include "amdgpu_trace.h"
57 #include "amdgpu_amdkfd.h"
58 #include "amdgpu_sdma.h"
59 #include "amdgpu_ras.h"
60 #include "amdgpu_atomfirmware.h"
61 #include "bif/bif_4_1_d.h"
62 
63 #define AMDGPU_TTM_VRAM_MAX_DW_READ	(size_t)128
64 
65 static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
66 				   struct ttm_tt *ttm,
67 				   struct ttm_resource *bo_mem);
68 static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
69 				      struct ttm_tt *ttm);
70 
71 static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
72 				    unsigned int type,
73 				    uint64_t size_in_page)
74 {
75 	return ttm_range_man_init(&adev->mman.bdev, type,
76 				  false, size_in_page);
77 }
78 
79 /**
80  * amdgpu_evict_flags - Compute placement flags
81  *
82  * @bo: The buffer object to evict
83  * @placement: Possible destination(s) for evicted BO
84  *
85  * Fill in placement data when ttm_bo_evict() is called
86  */
87 static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
88 				struct ttm_placement *placement)
89 {
90 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
91 	struct amdgpu_bo *abo;
92 	static const struct ttm_place placements = {
93 		.fpfn = 0,
94 		.lpfn = 0,
95 		.mem_type = TTM_PL_SYSTEM,
96 		.flags = 0
97 	};
98 
99 	/* Don't handle scatter gather BOs */
100 	if (bo->type == ttm_bo_type_sg) {
101 		placement->num_placement = 0;
102 		placement->num_busy_placement = 0;
103 		return;
104 	}
105 
106 	/* Object isn't an AMDGPU object so ignore */
107 	if (!amdgpu_bo_is_amdgpu_bo(bo)) {
108 		placement->placement = &placements;
109 		placement->busy_placement = &placements;
110 		placement->num_placement = 1;
111 		placement->num_busy_placement = 1;
112 		return;
113 	}
114 
115 	abo = ttm_to_amdgpu_bo(bo);
116 	switch (bo->mem.mem_type) {
117 	case AMDGPU_PL_GDS:
118 	case AMDGPU_PL_GWS:
119 	case AMDGPU_PL_OA:
120 		placement->num_placement = 0;
121 		placement->num_busy_placement = 0;
122 		return;
123 
124 	case TTM_PL_VRAM:
125 		if (!adev->mman.buffer_funcs_enabled) {
126 			/* Move to system memory */
127 			amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
128 		} else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
129 			   !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
130 			   amdgpu_bo_in_cpu_visible_vram(abo)) {
131 
132 			/* Try evicting to the CPU inaccessible part of VRAM
133 			 * first, but only set GTT as busy placement, so this
134 			 * BO will be evicted to GTT rather than causing other
135 			 * BOs to be evicted from VRAM
136 			 */
137 			amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
138 							 AMDGPU_GEM_DOMAIN_GTT);
139 			abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
140 			abo->placements[0].lpfn = 0;
141 			abo->placement.busy_placement = &abo->placements[1];
142 			abo->placement.num_busy_placement = 1;
143 		} else {
144 			/* Move to GTT memory */
145 			amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
146 		}
147 		break;
148 	case TTM_PL_TT:
149 	default:
150 		amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
151 		break;
152 	}
153 	*placement = abo->placement;
154 }
155 
156 /**
157  * amdgpu_verify_access - Verify access for a mmap call
158  *
159  * @bo:	The buffer object to map
160  * @filp: The file pointer from the process performing the mmap
161  *
162  * This is called by ttm_bo_mmap() to verify whether a process
163  * has the right to mmap a BO to their process space.
164  */
165 static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
166 {
167 	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
168 
169 	/*
170 	 * Don't verify access for KFD BOs. They don't have a GEM
171 	 * object associated with them.
172 	 */
173 	if (abo->kfd_bo)
174 		return 0;
175 
176 	if (amdgpu_ttm_tt_get_usermm(bo->ttm))
177 		return -EPERM;
178 	return drm_vma_node_verify_access(&abo->tbo.base.vma_node,
179 					  filp->private_data);
180 }
181 
182 /**
183  * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer.
184  *
185  * @bo: The bo to assign the memory to.
186  * @mm_node: Memory manager node for drm allocator.
187  * @mem: The region where the bo resides.
188  *
189  */
190 static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
191 				    struct drm_mm_node *mm_node,
192 				    struct ttm_resource *mem)
193 {
194 	uint64_t addr = 0;
195 
196 	if (mm_node->start != AMDGPU_BO_INVALID_OFFSET) {
197 		addr = mm_node->start << PAGE_SHIFT;
198 		addr += amdgpu_ttm_domain_start(amdgpu_ttm_adev(bo->bdev),
199 						mem->mem_type);
200 	}
201 	return addr;
202 }
203 
204 /**
205  * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to
206  * @offset. It also modifies the offset to be within the drm_mm_node returned
207  *
208  * @mem: The region where the bo resides.
209  * @offset: The offset that drm_mm_node is used for finding.
210  *
211  */
212 static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_resource *mem,
213 					       uint64_t *offset)
214 {
215 	struct drm_mm_node *mm_node = mem->mm_node;
216 
217 	while (*offset >= (mm_node->size << PAGE_SHIFT)) {
218 		*offset -= (mm_node->size << PAGE_SHIFT);
219 		++mm_node;
220 	}
221 	return mm_node;
222 }
223 
224 /**
225  * amdgpu_ttm_map_buffer - Map memory into the GART windows
226  * @bo: buffer object to map
227  * @mem: memory object to map
228  * @mm_node: drm_mm node object to map
229  * @num_pages: number of pages to map
230  * @offset: offset into @mm_node where to start
231  * @window: which GART window to use
232  * @ring: DMA ring to use for the copy
233  * @tmz: if we should setup a TMZ enabled mapping
234  * @addr: resulting address inside the MC address space
235  *
236  * Setup one of the GART windows to access a specific piece of memory or return
237  * the physical address for local memory.
238  */
239 static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
240 				 struct ttm_resource *mem,
241 				 struct drm_mm_node *mm_node,
242 				 unsigned num_pages, uint64_t offset,
243 				 unsigned window, struct amdgpu_ring *ring,
244 				 bool tmz, uint64_t *addr)
245 {
246 	struct amdgpu_device *adev = ring->adev;
247 	struct amdgpu_job *job;
248 	unsigned num_dw, num_bytes;
249 	struct dma_fence *fence;
250 	uint64_t src_addr, dst_addr;
251 	void *cpu_addr;
252 	uint64_t flags;
253 	unsigned int i;
254 	int r;
255 
256 	BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
257 	       AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
258 
259 	/* Map only what can't be accessed directly */
260 	if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
261 		*addr = amdgpu_mm_node_addr(bo, mm_node, mem) + offset;
262 		return 0;
263 	}
264 
265 	*addr = adev->gmc.gart_start;
266 	*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
267 		AMDGPU_GPU_PAGE_SIZE;
268 	*addr += offset & ~PAGE_MASK;
269 
270 	num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
271 	num_bytes = num_pages * 8;
272 
273 	r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
274 				     AMDGPU_IB_POOL_DELAYED, &job);
275 	if (r)
276 		return r;
277 
278 	src_addr = num_dw * 4;
279 	src_addr += job->ibs[0].gpu_addr;
280 
281 	dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
282 	dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
283 	amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
284 				dst_addr, num_bytes, false);
285 
286 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
287 	WARN_ON(job->ibs[0].length_dw > num_dw);
288 
289 	flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
290 	if (tmz)
291 		flags |= AMDGPU_PTE_TMZ;
292 
293 	cpu_addr = &job->ibs[0].ptr[num_dw];
294 
295 	if (mem->mem_type == TTM_PL_TT) {
296 		dma_addr_t *dma_address;
297 
298 		dma_address = &bo->ttm->dma_address[offset >> PAGE_SHIFT];
299 		r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
300 				    cpu_addr);
301 		if (r)
302 			goto error_free;
303 	} else {
304 		dma_addr_t dma_address;
305 
306 		dma_address = (mm_node->start << PAGE_SHIFT) + offset;
307 		dma_address += adev->vm_manager.vram_base_offset;
308 
309 		for (i = 0; i < num_pages; ++i) {
310 			r = amdgpu_gart_map(adev, i << PAGE_SHIFT, 1,
311 					    &dma_address, flags, cpu_addr);
312 			if (r)
313 				goto error_free;
314 
315 			dma_address += PAGE_SIZE;
316 		}
317 	}
318 
319 	r = amdgpu_job_submit(job, &adev->mman.entity,
320 			      AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
321 	if (r)
322 		goto error_free;
323 
324 	dma_fence_put(fence);
325 
326 	return r;
327 
328 error_free:
329 	amdgpu_job_free(job);
330 	return r;
331 }
332 
333 /**
334  * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
335  * @adev: amdgpu device
336  * @src: buffer/address where to read from
337  * @dst: buffer/address where to write to
338  * @size: number of bytes to copy
339  * @tmz: if a secure copy should be used
340  * @resv: resv object to sync to
341  * @f: Returns the last fence if multiple jobs are submitted.
342  *
343  * The function copies @size bytes from {src->mem + src->offset} to
344  * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
345  * move and different for a BO to BO copy.
346  *
347  */
348 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
349 			       const struct amdgpu_copy_mem *src,
350 			       const struct amdgpu_copy_mem *dst,
351 			       uint64_t size, bool tmz,
352 			       struct dma_resv *resv,
353 			       struct dma_fence **f)
354 {
355 	const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
356 					AMDGPU_GPU_PAGE_SIZE);
357 
358 	uint64_t src_node_size, dst_node_size, src_offset, dst_offset;
359 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
360 	struct drm_mm_node *src_mm, *dst_mm;
361 	struct dma_fence *fence = NULL;
362 	int r = 0;
363 
364 	if (!adev->mman.buffer_funcs_enabled) {
365 		DRM_ERROR("Trying to move memory with ring turned off.\n");
366 		return -EINVAL;
367 	}
368 
369 	src_offset = src->offset;
370 	if (src->mem->mm_node) {
371 		src_mm = amdgpu_find_mm_node(src->mem, &src_offset);
372 		src_node_size = (src_mm->size << PAGE_SHIFT) - src_offset;
373 	} else {
374 		src_mm = NULL;
375 		src_node_size = ULLONG_MAX;
376 	}
377 
378 	dst_offset = dst->offset;
379 	if (dst->mem->mm_node) {
380 		dst_mm = amdgpu_find_mm_node(dst->mem, &dst_offset);
381 		dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst_offset;
382 	} else {
383 		dst_mm = NULL;
384 		dst_node_size = ULLONG_MAX;
385 	}
386 
387 	mutex_lock(&adev->mman.gtt_window_lock);
388 
389 	while (size) {
390 		uint32_t src_page_offset = src_offset & ~PAGE_MASK;
391 		uint32_t dst_page_offset = dst_offset & ~PAGE_MASK;
392 		struct dma_fence *next;
393 		uint32_t cur_size;
394 		uint64_t from, to;
395 
396 		/* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
397 		 * begins at an offset, then adjust the size accordingly
398 		 */
399 		cur_size = max(src_page_offset, dst_page_offset);
400 		cur_size = min(min3(src_node_size, dst_node_size, size),
401 			       (uint64_t)(GTT_MAX_BYTES - cur_size));
402 
403 		/* Map src to window 0 and dst to window 1. */
404 		r = amdgpu_ttm_map_buffer(src->bo, src->mem, src_mm,
405 					  PFN_UP(cur_size + src_page_offset),
406 					  src_offset, 0, ring, tmz, &from);
407 		if (r)
408 			goto error;
409 
410 		r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, dst_mm,
411 					  PFN_UP(cur_size + dst_page_offset),
412 					  dst_offset, 1, ring, tmz, &to);
413 		if (r)
414 			goto error;
415 
416 		r = amdgpu_copy_buffer(ring, from, to, cur_size,
417 				       resv, &next, false, true, tmz);
418 		if (r)
419 			goto error;
420 
421 		dma_fence_put(fence);
422 		fence = next;
423 
424 		size -= cur_size;
425 		if (!size)
426 			break;
427 
428 		src_node_size -= cur_size;
429 		if (!src_node_size) {
430 			++src_mm;
431 			src_node_size = src_mm->size << PAGE_SHIFT;
432 			src_offset = 0;
433 		} else {
434 			src_offset += cur_size;
435 		}
436 
437 		dst_node_size -= cur_size;
438 		if (!dst_node_size) {
439 			++dst_mm;
440 			dst_node_size = dst_mm->size << PAGE_SHIFT;
441 			dst_offset = 0;
442 		} else {
443 			dst_offset += cur_size;
444 		}
445 	}
446 error:
447 	mutex_unlock(&adev->mman.gtt_window_lock);
448 	if (f)
449 		*f = dma_fence_get(fence);
450 	dma_fence_put(fence);
451 	return r;
452 }
453 
454 /*
455  * amdgpu_move_blit - Copy an entire buffer to another buffer
456  *
457  * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
458  * help move buffers to and from VRAM.
459  */
460 static int amdgpu_move_blit(struct ttm_buffer_object *bo,
461 			    bool evict,
462 			    struct ttm_resource *new_mem,
463 			    struct ttm_resource *old_mem)
464 {
465 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
466 	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
467 	struct amdgpu_copy_mem src, dst;
468 	struct dma_fence *fence = NULL;
469 	int r;
470 
471 	src.bo = bo;
472 	dst.bo = bo;
473 	src.mem = old_mem;
474 	dst.mem = new_mem;
475 	src.offset = 0;
476 	dst.offset = 0;
477 
478 	r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
479 				       new_mem->num_pages << PAGE_SHIFT,
480 				       amdgpu_bo_encrypted(abo),
481 				       bo->base.resv, &fence);
482 	if (r)
483 		goto error;
484 
485 	/* clear the space being freed */
486 	if (old_mem->mem_type == TTM_PL_VRAM &&
487 	    (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
488 		struct dma_fence *wipe_fence = NULL;
489 
490 		r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON,
491 				       NULL, &wipe_fence);
492 		if (r) {
493 			goto error;
494 		} else if (wipe_fence) {
495 			dma_fence_put(fence);
496 			fence = wipe_fence;
497 		}
498 	}
499 
500 	/* Always block for VM page tables before committing the new location */
501 	if (bo->type == ttm_bo_type_kernel)
502 		r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem);
503 	else
504 		r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem);
505 	dma_fence_put(fence);
506 	return r;
507 
508 error:
509 	if (fence)
510 		dma_fence_wait(fence, false);
511 	dma_fence_put(fence);
512 	return r;
513 }
514 
515 /*
516  * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
517  *
518  * Called by amdgpu_bo_move()
519  */
520 static bool amdgpu_mem_visible(struct amdgpu_device *adev,
521 			       struct ttm_resource *mem)
522 {
523 	struct drm_mm_node *nodes = mem->mm_node;
524 
525 	if (mem->mem_type == TTM_PL_SYSTEM ||
526 	    mem->mem_type == TTM_PL_TT)
527 		return true;
528 	if (mem->mem_type != TTM_PL_VRAM)
529 		return false;
530 
531 	/* ttm_resource_ioremap only supports contiguous memory */
532 	if (nodes->size != mem->num_pages)
533 		return false;
534 
535 	return ((nodes->start + nodes->size) << PAGE_SHIFT)
536 		<= adev->gmc.visible_vram_size;
537 }
538 
539 /*
540  * amdgpu_bo_move - Move a buffer object to a new memory location
541  *
542  * Called by ttm_bo_handle_move_mem()
543  */
544 static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
545 			  struct ttm_operation_ctx *ctx,
546 			  struct ttm_resource *new_mem,
547 			  struct ttm_place *hop)
548 {
549 	struct amdgpu_device *adev;
550 	struct amdgpu_bo *abo;
551 	struct ttm_resource *old_mem = &bo->mem;
552 	int r;
553 
554 	if ((old_mem->mem_type == TTM_PL_SYSTEM &&
555 	     new_mem->mem_type == TTM_PL_VRAM) ||
556 	    (old_mem->mem_type == TTM_PL_VRAM &&
557 	     new_mem->mem_type == TTM_PL_SYSTEM)) {
558 		hop->fpfn = 0;
559 		hop->lpfn = 0;
560 		hop->mem_type = TTM_PL_TT;
561 		hop->flags = 0;
562 		return -EMULTIHOP;
563 	}
564 
565 	if (new_mem->mem_type == TTM_PL_TT) {
566 		r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
567 		if (r)
568 			return r;
569 	}
570 
571 	amdgpu_bo_move_notify(bo, evict, new_mem);
572 
573 	/* Can't move a pinned BO */
574 	abo = ttm_to_amdgpu_bo(bo);
575 	if (WARN_ON_ONCE(abo->tbo.pin_count > 0))
576 		return -EINVAL;
577 
578 	adev = amdgpu_ttm_adev(bo->bdev);
579 
580 	if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
581 		ttm_bo_move_null(bo, new_mem);
582 		return 0;
583 	}
584 	if (old_mem->mem_type == TTM_PL_SYSTEM &&
585 	    new_mem->mem_type == TTM_PL_TT) {
586 		ttm_bo_move_null(bo, new_mem);
587 		return 0;
588 	}
589 
590 	if (old_mem->mem_type == TTM_PL_TT &&
591 	    new_mem->mem_type == TTM_PL_SYSTEM) {
592 		r = ttm_bo_wait_ctx(bo, ctx);
593 		if (r)
594 			goto fail;
595 
596 		amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
597 		ttm_resource_free(bo, &bo->mem);
598 		ttm_bo_assign_mem(bo, new_mem);
599 		return 0;
600 	}
601 
602 	if (old_mem->mem_type == AMDGPU_PL_GDS ||
603 	    old_mem->mem_type == AMDGPU_PL_GWS ||
604 	    old_mem->mem_type == AMDGPU_PL_OA ||
605 	    new_mem->mem_type == AMDGPU_PL_GDS ||
606 	    new_mem->mem_type == AMDGPU_PL_GWS ||
607 	    new_mem->mem_type == AMDGPU_PL_OA) {
608 		/* Nothing to save here */
609 		ttm_bo_move_null(bo, new_mem);
610 		return 0;
611 	}
612 
613 	if (!adev->mman.buffer_funcs_enabled) {
614 		r = -ENODEV;
615 		goto memcpy;
616 	}
617 
618 	r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
619 	if (r) {
620 memcpy:
621 		/* Check that all memory is CPU accessible */
622 		if (!amdgpu_mem_visible(adev, old_mem) ||
623 		    !amdgpu_mem_visible(adev, new_mem)) {
624 			pr_err("Move buffer fallback to memcpy unavailable\n");
625 			goto fail;
626 		}
627 
628 		r = ttm_bo_move_memcpy(bo, ctx, new_mem);
629 		if (r)
630 			goto fail;
631 	}
632 
633 	if (bo->type == ttm_bo_type_device &&
634 	    new_mem->mem_type == TTM_PL_VRAM &&
635 	    old_mem->mem_type != TTM_PL_VRAM) {
636 		/* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
637 		 * accesses the BO after it's moved.
638 		 */
639 		abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
640 	}
641 
642 	/* update statistics */
643 	atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
644 	return 0;
645 fail:
646 	swap(*new_mem, bo->mem);
647 	amdgpu_bo_move_notify(bo, false, new_mem);
648 	swap(*new_mem, bo->mem);
649 	return r;
650 }
651 
652 /*
653  * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
654  *
655  * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
656  */
657 static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
658 {
659 	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
660 	struct drm_mm_node *mm_node = mem->mm_node;
661 	size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
662 
663 	switch (mem->mem_type) {
664 	case TTM_PL_SYSTEM:
665 		/* system memory */
666 		return 0;
667 	case TTM_PL_TT:
668 		break;
669 	case TTM_PL_VRAM:
670 		mem->bus.offset = mem->start << PAGE_SHIFT;
671 		/* check if it's visible */
672 		if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
673 			return -EINVAL;
674 		/* Only physically contiguous buffers apply. In a contiguous
675 		 * buffer, size of the first mm_node would match the number of
676 		 * pages in ttm_resource.
677 		 */
678 		if (adev->mman.aper_base_kaddr &&
679 		    (mm_node->size == mem->num_pages))
680 			mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
681 					mem->bus.offset;
682 
683 		mem->bus.offset += adev->gmc.aper_base;
684 		mem->bus.is_iomem = true;
685 		mem->bus.caching = ttm_write_combined;
686 		break;
687 	default:
688 		return -EINVAL;
689 	}
690 	return 0;
691 }
692 
693 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
694 					   unsigned long page_offset)
695 {
696 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
697 	uint64_t offset = (page_offset << PAGE_SHIFT);
698 	struct drm_mm_node *mm;
699 
700 	mm = amdgpu_find_mm_node(&bo->mem, &offset);
701 	offset += adev->gmc.aper_base;
702 	return mm->start + (offset >> PAGE_SHIFT);
703 }
704 
705 /**
706  * amdgpu_ttm_domain_start - Returns GPU start address
707  * @adev: amdgpu device object
708  * @type: type of the memory
709  *
710  * Returns:
711  * GPU start address of a memory domain
712  */
713 
714 uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
715 {
716 	switch (type) {
717 	case TTM_PL_TT:
718 		return adev->gmc.gart_start;
719 	case TTM_PL_VRAM:
720 		return adev->gmc.vram_start;
721 	}
722 
723 	return 0;
724 }
725 
726 /*
727  * TTM backend functions.
728  */
729 struct amdgpu_ttm_tt {
730 	struct ttm_tt	ttm;
731 	struct drm_gem_object	*gobj;
732 	u64			offset;
733 	uint64_t		userptr;
734 	struct task_struct	*usertask;
735 	uint32_t		userflags;
736 	bool			bound;
737 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
738 	struct hmm_range	*range;
739 #endif
740 };
741 
742 #ifdef CONFIG_DRM_AMDGPU_USERPTR
743 /*
744  * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
745  * memory and start HMM tracking CPU page table update
746  *
747  * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
748  * once afterwards to stop HMM tracking
749  */
750 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
751 {
752 	struct ttm_tt *ttm = bo->tbo.ttm;
753 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
754 	unsigned long start = gtt->userptr;
755 	struct vm_area_struct *vma;
756 	struct hmm_range *range;
757 	unsigned long timeout;
758 	struct mm_struct *mm;
759 	unsigned long i;
760 	int r = 0;
761 
762 	mm = bo->notifier.mm;
763 	if (unlikely(!mm)) {
764 		DRM_DEBUG_DRIVER("BO is not registered?\n");
765 		return -EFAULT;
766 	}
767 
768 	/* Another get_user_pages is running at the same time?? */
769 	if (WARN_ON(gtt->range))
770 		return -EFAULT;
771 
772 	if (!mmget_not_zero(mm)) /* Happens during process shutdown */
773 		return -ESRCH;
774 
775 	range = kzalloc(sizeof(*range), GFP_KERNEL);
776 	if (unlikely(!range)) {
777 		r = -ENOMEM;
778 		goto out;
779 	}
780 	range->notifier = &bo->notifier;
781 	range->start = bo->notifier.interval_tree.start;
782 	range->end = bo->notifier.interval_tree.last + 1;
783 	range->default_flags = HMM_PFN_REQ_FAULT;
784 	if (!amdgpu_ttm_tt_is_readonly(ttm))
785 		range->default_flags |= HMM_PFN_REQ_WRITE;
786 
787 	range->hmm_pfns = kvmalloc_array(ttm->num_pages,
788 					 sizeof(*range->hmm_pfns), GFP_KERNEL);
789 	if (unlikely(!range->hmm_pfns)) {
790 		r = -ENOMEM;
791 		goto out_free_ranges;
792 	}
793 
794 	mmap_read_lock(mm);
795 	vma = find_vma(mm, start);
796 	if (unlikely(!vma || start < vma->vm_start)) {
797 		r = -EFAULT;
798 		goto out_unlock;
799 	}
800 	if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
801 		vma->vm_file)) {
802 		r = -EPERM;
803 		goto out_unlock;
804 	}
805 	mmap_read_unlock(mm);
806 	timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
807 
808 retry:
809 	range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
810 
811 	mmap_read_lock(mm);
812 	r = hmm_range_fault(range);
813 	mmap_read_unlock(mm);
814 	if (unlikely(r)) {
815 		/*
816 		 * FIXME: This timeout should encompass the retry from
817 		 * mmu_interval_read_retry() as well.
818 		 */
819 		if (r == -EBUSY && !time_after(jiffies, timeout))
820 			goto retry;
821 		goto out_free_pfns;
822 	}
823 
824 	/*
825 	 * Due to default_flags, all pages are HMM_PFN_VALID or
826 	 * hmm_range_fault() fails. FIXME: The pages cannot be touched outside
827 	 * the notifier_lock, and mmu_interval_read_retry() must be done first.
828 	 */
829 	for (i = 0; i < ttm->num_pages; i++)
830 		pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]);
831 
832 	gtt->range = range;
833 	mmput(mm);
834 
835 	return 0;
836 
837 out_unlock:
838 	mmap_read_unlock(mm);
839 out_free_pfns:
840 	kvfree(range->hmm_pfns);
841 out_free_ranges:
842 	kfree(range);
843 out:
844 	mmput(mm);
845 	return r;
846 }
847 
848 /*
849  * amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change
850  * Check if the pages backing this ttm range have been invalidated
851  *
852  * Returns: true if pages are still valid
853  */
854 bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
855 {
856 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
857 	bool r = false;
858 
859 	if (!gtt || !gtt->userptr)
860 		return false;
861 
862 	DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
863 		gtt->userptr, ttm->num_pages);
864 
865 	WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,
866 		"No user pages to check\n");
867 
868 	if (gtt->range) {
869 		/*
870 		 * FIXME: Must always hold notifier_lock for this, and must
871 		 * not ignore the return code.
872 		 */
873 		r = mmu_interval_read_retry(gtt->range->notifier,
874 					 gtt->range->notifier_seq);
875 		kvfree(gtt->range->hmm_pfns);
876 		kfree(gtt->range);
877 		gtt->range = NULL;
878 	}
879 
880 	return !r;
881 }
882 #endif
883 
884 /*
885  * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
886  *
887  * Called by amdgpu_cs_list_validate(). This creates the page list
888  * that backs user memory and will ultimately be mapped into the device
889  * address space.
890  */
891 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
892 {
893 	unsigned long i;
894 
895 	for (i = 0; i < ttm->num_pages; ++i)
896 		ttm->pages[i] = pages ? pages[i] : NULL;
897 }
898 
899 /*
900  * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
901  *
902  * Called by amdgpu_ttm_backend_bind()
903  **/
904 static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
905 				     struct ttm_tt *ttm)
906 {
907 	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
908 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
909 	int r;
910 
911 	int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
912 	enum dma_data_direction direction = write ?
913 		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
914 
915 	/* Allocate an SG array and squash pages into it */
916 	r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
917 				      ttm->num_pages << PAGE_SHIFT,
918 				      GFP_KERNEL);
919 	if (r)
920 		goto release_sg;
921 
922 	/* Map SG to device */
923 	r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
924 	if (r)
925 		goto release_sg;
926 
927 	/* convert SG to linear array of pages and dma addresses */
928 	drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
929 					 gtt->ttm.dma_address, ttm->num_pages);
930 
931 	return 0;
932 
933 release_sg:
934 	kfree(ttm->sg);
935 	ttm->sg = NULL;
936 	return r;
937 }
938 
939 /*
940  * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
941  */
942 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev,
943 					struct ttm_tt *ttm)
944 {
945 	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
946 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
947 
948 	int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
949 	enum dma_data_direction direction = write ?
950 		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
951 
952 	/* double check that we don't free the table twice */
953 	if (!ttm->sg->sgl)
954 		return;
955 
956 	/* unmap the pages mapped to the device */
957 	dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
958 	sg_free_table(ttm->sg);
959 
960 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
961 	if (gtt->range) {
962 		unsigned long i;
963 
964 		for (i = 0; i < ttm->num_pages; i++) {
965 			if (ttm->pages[i] !=
966 			    hmm_pfn_to_page(gtt->range->hmm_pfns[i]))
967 				break;
968 		}
969 
970 		WARN((i == ttm->num_pages), "Missing get_user_page_done\n");
971 	}
972 #endif
973 }
974 
975 static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
976 				struct ttm_buffer_object *tbo,
977 				uint64_t flags)
978 {
979 	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
980 	struct ttm_tt *ttm = tbo->ttm;
981 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
982 	int r;
983 
984 	if (amdgpu_bo_encrypted(abo))
985 		flags |= AMDGPU_PTE_TMZ;
986 
987 	if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
988 		uint64_t page_idx = 1;
989 
990 		r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
991 				ttm->pages, gtt->ttm.dma_address, flags);
992 		if (r)
993 			goto gart_bind_fail;
994 
995 		/* The memory type of the first page defaults to UC. Now
996 		 * modify the memory type to NC from the second page of
997 		 * the BO onward.
998 		 */
999 		flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
1000 		flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
1001 
1002 		r = amdgpu_gart_bind(adev,
1003 				gtt->offset + (page_idx << PAGE_SHIFT),
1004 				ttm->num_pages - page_idx,
1005 				&ttm->pages[page_idx],
1006 				&(gtt->ttm.dma_address[page_idx]), flags);
1007 	} else {
1008 		r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
1009 				     ttm->pages, gtt->ttm.dma_address, flags);
1010 	}
1011 
1012 gart_bind_fail:
1013 	if (r)
1014 		DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
1015 			  ttm->num_pages, gtt->offset);
1016 
1017 	return r;
1018 }
1019 
1020 /*
1021  * amdgpu_ttm_backend_bind - Bind GTT memory
1022  *
1023  * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
1024  * This handles binding GTT memory to the device address space.
1025  */
1026 static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
1027 				   struct ttm_tt *ttm,
1028 				   struct ttm_resource *bo_mem)
1029 {
1030 	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1031 	struct amdgpu_ttm_tt *gtt = (void*)ttm;
1032 	uint64_t flags;
1033 	int r = 0;
1034 
1035 	if (!bo_mem)
1036 		return -EINVAL;
1037 
1038 	if (gtt->bound)
1039 		return 0;
1040 
1041 	if (gtt->userptr) {
1042 		r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
1043 		if (r) {
1044 			DRM_ERROR("failed to pin userptr\n");
1045 			return r;
1046 		}
1047 	}
1048 	if (!ttm->num_pages) {
1049 		WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
1050 		     ttm->num_pages, bo_mem, ttm);
1051 	}
1052 
1053 	if (bo_mem->mem_type == AMDGPU_PL_GDS ||
1054 	    bo_mem->mem_type == AMDGPU_PL_GWS ||
1055 	    bo_mem->mem_type == AMDGPU_PL_OA)
1056 		return -EINVAL;
1057 
1058 	if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
1059 		gtt->offset = AMDGPU_BO_INVALID_OFFSET;
1060 		return 0;
1061 	}
1062 
1063 	/* compute PTE flags relevant to this BO memory */
1064 	flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
1065 
1066 	/* bind pages into GART page tables */
1067 	gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
1068 	r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
1069 		ttm->pages, gtt->ttm.dma_address, flags);
1070 
1071 	if (r)
1072 		DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
1073 			  ttm->num_pages, gtt->offset);
1074 	gtt->bound = true;
1075 	return r;
1076 }
1077 
1078 /*
1079  * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either
1080  * through AGP or GART aperture.
1081  *
1082  * If bo is accessible through AGP aperture, then use AGP aperture
1083  * to access bo; otherwise allocate logical space in GART aperture
1084  * and map bo to GART aperture.
1085  */
1086 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
1087 {
1088 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1089 	struct ttm_operation_ctx ctx = { false, false };
1090 	struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
1091 	struct ttm_resource tmp;
1092 	struct ttm_placement placement;
1093 	struct ttm_place placements;
1094 	uint64_t addr, flags;
1095 	int r;
1096 
1097 	if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
1098 		return 0;
1099 
1100 	addr = amdgpu_gmc_agp_addr(bo);
1101 	if (addr != AMDGPU_BO_INVALID_OFFSET) {
1102 		bo->mem.start = addr >> PAGE_SHIFT;
1103 	} else {
1104 
1105 		/* allocate GART space */
1106 		tmp = bo->mem;
1107 		tmp.mm_node = NULL;
1108 		placement.num_placement = 1;
1109 		placement.placement = &placements;
1110 		placement.num_busy_placement = 1;
1111 		placement.busy_placement = &placements;
1112 		placements.fpfn = 0;
1113 		placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
1114 		placements.mem_type = TTM_PL_TT;
1115 		placements.flags = bo->mem.placement;
1116 
1117 		r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
1118 		if (unlikely(r))
1119 			return r;
1120 
1121 		/* compute PTE flags for this buffer object */
1122 		flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
1123 
1124 		/* Bind pages */
1125 		gtt->offset = (u64)tmp.start << PAGE_SHIFT;
1126 		r = amdgpu_ttm_gart_bind(adev, bo, flags);
1127 		if (unlikely(r)) {
1128 			ttm_resource_free(bo, &tmp);
1129 			return r;
1130 		}
1131 
1132 		ttm_resource_free(bo, &bo->mem);
1133 		bo->mem = tmp;
1134 	}
1135 
1136 	return 0;
1137 }
1138 
1139 /*
1140  * amdgpu_ttm_recover_gart - Rebind GTT pages
1141  *
1142  * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
1143  * rebind GTT pages during a GPU reset.
1144  */
1145 int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
1146 {
1147 	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
1148 	uint64_t flags;
1149 	int r;
1150 
1151 	if (!tbo->ttm)
1152 		return 0;
1153 
1154 	flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem);
1155 	r = amdgpu_ttm_gart_bind(adev, tbo, flags);
1156 
1157 	return r;
1158 }
1159 
1160 /*
1161  * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
1162  *
1163  * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
1164  * ttm_tt_destroy().
1165  */
1166 static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
1167 				      struct ttm_tt *ttm)
1168 {
1169 	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1170 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1171 	int r;
1172 
1173 	if (!gtt->bound)
1174 		return;
1175 
1176 	/* if the pages have userptr pinning then clear that first */
1177 	if (gtt->userptr)
1178 		amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
1179 
1180 	if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
1181 		return;
1182 
1183 	/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
1184 	r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
1185 	if (r)
1186 		DRM_ERROR("failed to unbind %u pages at 0x%08llX\n",
1187 			  gtt->ttm.num_pages, gtt->offset);
1188 	gtt->bound = false;
1189 }
1190 
1191 static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev,
1192 				       struct ttm_tt *ttm)
1193 {
1194 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1195 
1196 	amdgpu_ttm_backend_unbind(bdev, ttm);
1197 	ttm_tt_destroy_common(bdev, ttm);
1198 	if (gtt->usertask)
1199 		put_task_struct(gtt->usertask);
1200 
1201 	ttm_tt_fini(&gtt->ttm);
1202 	kfree(gtt);
1203 }
1204 
1205 /**
1206  * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
1207  *
1208  * @bo: The buffer object to create a GTT ttm_tt object around
1209  * @page_flags: Page flags to be added to the ttm_tt object
1210  *
1211  * Called by ttm_tt_create().
1212  */
1213 static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
1214 					   uint32_t page_flags)
1215 {
1216 	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1217 	struct amdgpu_ttm_tt *gtt;
1218 	enum ttm_caching caching;
1219 
1220 	gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
1221 	if (gtt == NULL) {
1222 		return NULL;
1223 	}
1224 	gtt->gobj = &bo->base;
1225 
1226 	if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
1227 		caching = ttm_write_combined;
1228 	else
1229 		caching = ttm_cached;
1230 
1231 	/* allocate space for the uninitialized page entries */
1232 	if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags, caching)) {
1233 		kfree(gtt);
1234 		return NULL;
1235 	}
1236 	return &gtt->ttm;
1237 }
1238 
1239 /*
1240  * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
1241  *
1242  * Map the pages of a ttm_tt object to an address space visible
1243  * to the underlying device.
1244  */
1245 static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
1246 				  struct ttm_tt *ttm,
1247 				  struct ttm_operation_ctx *ctx)
1248 {
1249 	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1250 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1251 
1252 	/* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
1253 	if (gtt && gtt->userptr) {
1254 		ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1255 		if (!ttm->sg)
1256 			return -ENOMEM;
1257 
1258 		ttm->page_flags |= TTM_PAGE_FLAG_SG;
1259 		return 0;
1260 	}
1261 
1262 	if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
1263 		if (!ttm->sg) {
1264 			struct dma_buf_attachment *attach;
1265 			struct sg_table *sgt;
1266 
1267 			attach = gtt->gobj->import_attach;
1268 			sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
1269 			if (IS_ERR(sgt))
1270 				return PTR_ERR(sgt);
1271 
1272 			ttm->sg = sgt;
1273 		}
1274 
1275 		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1276 						 gtt->ttm.dma_address,
1277 						 ttm->num_pages);
1278 		return 0;
1279 	}
1280 
1281 	return ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
1282 }
1283 
1284 /*
1285  * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
1286  *
1287  * Unmaps pages of a ttm_tt object from the device address space and
1288  * unpopulates the page array backing it.
1289  */
1290 static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
1291 				     struct ttm_tt *ttm)
1292 {
1293 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1294 	struct amdgpu_device *adev;
1295 
1296 	if (gtt && gtt->userptr) {
1297 		amdgpu_ttm_tt_set_user_pages(ttm, NULL);
1298 		kfree(ttm->sg);
1299 		ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
1300 		return;
1301 	}
1302 
1303 	if (ttm->sg && gtt->gobj->import_attach) {
1304 		struct dma_buf_attachment *attach;
1305 
1306 		attach = gtt->gobj->import_attach;
1307 		dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
1308 		ttm->sg = NULL;
1309 		return;
1310 	}
1311 
1312 	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
1313 		return;
1314 
1315 	adev = amdgpu_ttm_adev(bdev);
1316 	return ttm_pool_free(&adev->mman.bdev.pool, ttm);
1317 }
1318 
1319 /**
1320  * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
1321  * task
1322  *
1323  * @bo: The ttm_buffer_object to bind this userptr to
1324  * @addr:  The address in the current tasks VM space to use
1325  * @flags: Requirements of userptr object.
1326  *
1327  * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages
1328  * to current task
1329  */
1330 int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
1331 			      uint64_t addr, uint32_t flags)
1332 {
1333 	struct amdgpu_ttm_tt *gtt;
1334 
1335 	if (!bo->ttm) {
1336 		/* TODO: We want a separate TTM object type for userptrs */
1337 		bo->ttm = amdgpu_ttm_tt_create(bo, 0);
1338 		if (bo->ttm == NULL)
1339 			return -ENOMEM;
1340 	}
1341 
1342 	gtt = (void *)bo->ttm;
1343 	gtt->userptr = addr;
1344 	gtt->userflags = flags;
1345 
1346 	if (gtt->usertask)
1347 		put_task_struct(gtt->usertask);
1348 	gtt->usertask = current->group_leader;
1349 	get_task_struct(gtt->usertask);
1350 
1351 	return 0;
1352 }
1353 
1354 /*
1355  * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
1356  */
1357 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1358 {
1359 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1360 
1361 	if (gtt == NULL)
1362 		return NULL;
1363 
1364 	if (gtt->usertask == NULL)
1365 		return NULL;
1366 
1367 	return gtt->usertask->mm;
1368 }
1369 
1370 /*
1371  * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
1372  * address range for the current task.
1373  *
1374  */
1375 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1376 				  unsigned long end)
1377 {
1378 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1379 	unsigned long size;
1380 
1381 	if (gtt == NULL || !gtt->userptr)
1382 		return false;
1383 
1384 	/* Return false if no part of the ttm_tt object lies within
1385 	 * the range
1386 	 */
1387 	size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
1388 	if (gtt->userptr > end || gtt->userptr + size <= start)
1389 		return false;
1390 
1391 	return true;
1392 }
1393 
1394 /*
1395  * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
1396  */
1397 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
1398 {
1399 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1400 
1401 	if (gtt == NULL || !gtt->userptr)
1402 		return false;
1403 
1404 	return true;
1405 }
1406 
1407 /*
1408  * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
1409  */
1410 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1411 {
1412 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1413 
1414 	if (gtt == NULL)
1415 		return false;
1416 
1417 	return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1418 }
1419 
1420 /**
1421  * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object
1422  *
1423  * @ttm: The ttm_tt object to compute the flags for
1424  * @mem: The memory registry backing this ttm_tt object
1425  *
1426  * Figure out the flags to use for a VM PDE (Page Directory Entry).
1427  */
1428 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
1429 {
1430 	uint64_t flags = 0;
1431 
1432 	if (mem && mem->mem_type != TTM_PL_SYSTEM)
1433 		flags |= AMDGPU_PTE_VALID;
1434 
1435 	if (mem && mem->mem_type == TTM_PL_TT) {
1436 		flags |= AMDGPU_PTE_SYSTEM;
1437 
1438 		if (ttm->caching == ttm_cached)
1439 			flags |= AMDGPU_PTE_SNOOPED;
1440 	}
1441 
1442 	return flags;
1443 }
1444 
1445 /**
1446  * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
1447  *
1448  * @adev: amdgpu_device pointer
1449  * @ttm: The ttm_tt object to compute the flags for
1450  * @mem: The memory registry backing this ttm_tt object
1451  *
1452  * Figure out the flags to use for a VM PTE (Page Table Entry).
1453  */
1454 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1455 				 struct ttm_resource *mem)
1456 {
1457 	uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
1458 
1459 	flags |= adev->gart.gart_pte_flags;
1460 	flags |= AMDGPU_PTE_READABLE;
1461 
1462 	if (!amdgpu_ttm_tt_is_readonly(ttm))
1463 		flags |= AMDGPU_PTE_WRITEABLE;
1464 
1465 	return flags;
1466 }
1467 
1468 /*
1469  * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
1470  * object.
1471  *
1472  * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
1473  * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
1474  * it can find space for a new object and by ttm_bo_force_list_clean() which is
1475  * used to clean out a memory space.
1476  */
1477 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1478 					    const struct ttm_place *place)
1479 {
1480 	unsigned long num_pages = bo->mem.num_pages;
1481 	struct drm_mm_node *node = bo->mem.mm_node;
1482 	struct dma_resv_list *flist;
1483 	struct dma_fence *f;
1484 	int i;
1485 
1486 	if (bo->type == ttm_bo_type_kernel &&
1487 	    !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
1488 		return false;
1489 
1490 	/* If bo is a KFD BO, check if the bo belongs to the current process.
1491 	 * If true, then return false as any KFD process needs all its BOs to
1492 	 * be resident to run successfully
1493 	 */
1494 	flist = dma_resv_get_list(bo->base.resv);
1495 	if (flist) {
1496 		for (i = 0; i < flist->shared_count; ++i) {
1497 			f = rcu_dereference_protected(flist->shared[i],
1498 				dma_resv_held(bo->base.resv));
1499 			if (amdkfd_fence_check_mm(f, current->mm))
1500 				return false;
1501 		}
1502 	}
1503 
1504 	switch (bo->mem.mem_type) {
1505 	case TTM_PL_TT:
1506 		if (amdgpu_bo_is_amdgpu_bo(bo) &&
1507 		    amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
1508 			return false;
1509 		return true;
1510 
1511 	case TTM_PL_VRAM:
1512 		/* Check each drm MM node individually */
1513 		while (num_pages) {
1514 			if (place->fpfn < (node->start + node->size) &&
1515 			    !(place->lpfn && place->lpfn <= node->start))
1516 				return true;
1517 
1518 			num_pages -= node->size;
1519 			++node;
1520 		}
1521 		return false;
1522 
1523 	default:
1524 		break;
1525 	}
1526 
1527 	return ttm_bo_eviction_valuable(bo, place);
1528 }
1529 
1530 /**
1531  * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
1532  *
1533  * @bo:  The buffer object to read/write
1534  * @offset:  Offset into buffer object
1535  * @buf:  Secondary buffer to write/read from
1536  * @len: Length in bytes of access
1537  * @write:  true if writing
1538  *
1539  * This is used to access VRAM that backs a buffer object via MMIO
1540  * access for debugging purposes.
1541  */
1542 static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1543 				    unsigned long offset,
1544 				    void *buf, int len, int write)
1545 {
1546 	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1547 	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1548 	struct drm_mm_node *nodes;
1549 	uint32_t value = 0;
1550 	int ret = 0;
1551 	uint64_t pos;
1552 	unsigned long flags;
1553 
1554 	if (bo->mem.mem_type != TTM_PL_VRAM)
1555 		return -EIO;
1556 
1557 	pos = offset;
1558 	nodes = amdgpu_find_mm_node(&abo->tbo.mem, &pos);
1559 	pos += (nodes->start << PAGE_SHIFT);
1560 
1561 	while (len && pos < adev->gmc.mc_vram_size) {
1562 		uint64_t aligned_pos = pos & ~(uint64_t)3;
1563 		uint64_t bytes = 4 - (pos & 3);
1564 		uint32_t shift = (pos & 3) * 8;
1565 		uint32_t mask = 0xffffffff << shift;
1566 
1567 		if (len < bytes) {
1568 			mask &= 0xffffffff >> (bytes - len) * 8;
1569 			bytes = len;
1570 		}
1571 
1572 		if (mask != 0xffffffff) {
1573 			spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1574 			WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
1575 			WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
1576 			if (!write || mask != 0xffffffff)
1577 				value = RREG32_NO_KIQ(mmMM_DATA);
1578 			if (write) {
1579 				value &= ~mask;
1580 				value |= (*(uint32_t *)buf << shift) & mask;
1581 				WREG32_NO_KIQ(mmMM_DATA, value);
1582 			}
1583 			spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1584 			if (!write) {
1585 				value = (value & mask) >> shift;
1586 				memcpy(buf, &value, bytes);
1587 			}
1588 		} else {
1589 			bytes = (nodes->start + nodes->size) << PAGE_SHIFT;
1590 			bytes = min(bytes - pos, (uint64_t)len & ~0x3ull);
1591 
1592 			amdgpu_device_vram_access(adev, pos, (uint32_t *)buf,
1593 						  bytes, write);
1594 		}
1595 
1596 		ret += bytes;
1597 		buf = (uint8_t *)buf + bytes;
1598 		pos += bytes;
1599 		len -= bytes;
1600 		if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
1601 			++nodes;
1602 			pos = (nodes->start << PAGE_SHIFT);
1603 		}
1604 	}
1605 
1606 	return ret;
1607 }
1608 
1609 static void
1610 amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
1611 {
1612 	amdgpu_bo_move_notify(bo, false, NULL);
1613 }
1614 
1615 static struct ttm_bo_driver amdgpu_bo_driver = {
1616 	.ttm_tt_create = &amdgpu_ttm_tt_create,
1617 	.ttm_tt_populate = &amdgpu_ttm_tt_populate,
1618 	.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1619 	.ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
1620 	.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1621 	.evict_flags = &amdgpu_evict_flags,
1622 	.move = &amdgpu_bo_move,
1623 	.verify_access = &amdgpu_verify_access,
1624 	.delete_mem_notify = &amdgpu_bo_delete_mem_notify,
1625 	.release_notify = &amdgpu_bo_release_notify,
1626 	.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1627 	.io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1628 	.access_memory = &amdgpu_ttm_access_memory,
1629 	.del_from_lru_notify = &amdgpu_vm_del_from_lru_notify
1630 };
1631 
1632 /*
1633  * Firmware Reservation functions
1634  */
1635 /**
1636  * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1637  *
1638  * @adev: amdgpu_device pointer
1639  *
1640  * free fw reserved vram if it has been reserved.
1641  */
1642 static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
1643 {
1644 	amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo,
1645 		NULL, &adev->mman.fw_vram_usage_va);
1646 }
1647 
1648 /**
1649  * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1650  *
1651  * @adev: amdgpu_device pointer
1652  *
1653  * create bo vram reservation from fw.
1654  */
1655 static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1656 {
1657 	uint64_t vram_size = adev->gmc.visible_vram_size;
1658 
1659 	adev->mman.fw_vram_usage_va = NULL;
1660 	adev->mman.fw_vram_usage_reserved_bo = NULL;
1661 
1662 	if (adev->mman.fw_vram_usage_size == 0 ||
1663 	    adev->mman.fw_vram_usage_size > vram_size)
1664 		return 0;
1665 
1666 	return amdgpu_bo_create_kernel_at(adev,
1667 					  adev->mman.fw_vram_usage_start_offset,
1668 					  adev->mman.fw_vram_usage_size,
1669 					  AMDGPU_GEM_DOMAIN_VRAM,
1670 					  &adev->mman.fw_vram_usage_reserved_bo,
1671 					  &adev->mman.fw_vram_usage_va);
1672 }
1673 
1674 /*
1675  * Memoy training reservation functions
1676  */
1677 
1678 /**
1679  * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
1680  *
1681  * @adev: amdgpu_device pointer
1682  *
1683  * free memory training reserved vram if it has been reserved.
1684  */
1685 static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
1686 {
1687 	struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1688 
1689 	ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
1690 	amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
1691 	ctx->c2p_bo = NULL;
1692 
1693 	return 0;
1694 }
1695 
1696 static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
1697 {
1698 	struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1699 
1700 	memset(ctx, 0, sizeof(*ctx));
1701 
1702 	ctx->c2p_train_data_offset =
1703 		ALIGN((adev->gmc.mc_vram_size - adev->mman.discovery_tmr_size - SZ_1M), SZ_1M);
1704 	ctx->p2c_train_data_offset =
1705 		(adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
1706 	ctx->train_data_size =
1707 		GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
1708 
1709 	DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
1710 			ctx->train_data_size,
1711 			ctx->p2c_train_data_offset,
1712 			ctx->c2p_train_data_offset);
1713 }
1714 
1715 /*
1716  * reserve TMR memory at the top of VRAM which holds
1717  * IP Discovery data and is protected by PSP.
1718  */
1719 static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
1720 {
1721 	int ret;
1722 	struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1723 	bool mem_train_support = false;
1724 
1725 	if (!amdgpu_sriov_vf(adev)) {
1726 		ret = amdgpu_mem_train_support(adev);
1727 		if (ret == 1)
1728 			mem_train_support = true;
1729 		else if (ret == -1)
1730 			return -EINVAL;
1731 		else
1732 			DRM_DEBUG("memory training does not support!\n");
1733 	}
1734 
1735 	/*
1736 	 * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all
1737 	 * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc)
1738 	 *
1739 	 * Otherwise, fallback to legacy approach to check and reserve tmr block for ip
1740 	 * discovery data and G6 memory training data respectively
1741 	 */
1742 	adev->mman.discovery_tmr_size =
1743 		amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
1744 	if (!adev->mman.discovery_tmr_size)
1745 		adev->mman.discovery_tmr_size = DISCOVERY_TMR_OFFSET;
1746 
1747 	if (mem_train_support) {
1748 		/* reserve vram for mem train according to TMR location */
1749 		amdgpu_ttm_training_data_block_init(adev);
1750 		ret = amdgpu_bo_create_kernel_at(adev,
1751 					 ctx->c2p_train_data_offset,
1752 					 ctx->train_data_size,
1753 					 AMDGPU_GEM_DOMAIN_VRAM,
1754 					 &ctx->c2p_bo,
1755 					 NULL);
1756 		if (ret) {
1757 			DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
1758 			amdgpu_ttm_training_reserve_vram_fini(adev);
1759 			return ret;
1760 		}
1761 		ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
1762 	}
1763 
1764 	ret = amdgpu_bo_create_kernel_at(adev,
1765 				adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
1766 				adev->mman.discovery_tmr_size,
1767 				AMDGPU_GEM_DOMAIN_VRAM,
1768 				&adev->mman.discovery_memory,
1769 				NULL);
1770 	if (ret) {
1771 		DRM_ERROR("alloc tmr failed(%d)!\n", ret);
1772 		amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
1773 		return ret;
1774 	}
1775 
1776 	return 0;
1777 }
1778 
1779 /*
1780  * amdgpu_ttm_init - Init the memory management (ttm) as well as various
1781  * gtt/vram related fields.
1782  *
1783  * This initializes all of the memory space pools that the TTM layer
1784  * will need such as the GTT space (system memory mapped to the device),
1785  * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
1786  * can be mapped per VMID.
1787  */
1788 int amdgpu_ttm_init(struct amdgpu_device *adev)
1789 {
1790 	uint64_t gtt_size;
1791 	int r;
1792 	u64 vis_vram_limit;
1793 
1794 	mutex_init(&adev->mman.gtt_window_lock);
1795 
1796 	/* No others user of address space so set it to 0 */
1797 	r = ttm_bo_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
1798 			       adev_to_drm(adev)->anon_inode->i_mapping,
1799 			       adev_to_drm(adev)->vma_offset_manager,
1800 			       adev->need_swiotlb,
1801 			       dma_addressing_limited(adev->dev));
1802 	if (r) {
1803 		DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
1804 		return r;
1805 	}
1806 	adev->mman.initialized = true;
1807 
1808 	/* Initialize VRAM pool with all of VRAM divided into pages */
1809 	r = amdgpu_vram_mgr_init(adev);
1810 	if (r) {
1811 		DRM_ERROR("Failed initializing VRAM heap.\n");
1812 		return r;
1813 	}
1814 
1815 	/* Reduce size of CPU-visible VRAM if requested */
1816 	vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
1817 	if (amdgpu_vis_vram_limit > 0 &&
1818 	    vis_vram_limit <= adev->gmc.visible_vram_size)
1819 		adev->gmc.visible_vram_size = vis_vram_limit;
1820 
1821 	/* Change the size here instead of the init above so only lpfn is affected */
1822 	amdgpu_ttm_set_buffer_funcs_status(adev, false);
1823 #ifdef CONFIG_64BIT
1824 	adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
1825 						adev->gmc.visible_vram_size);
1826 #endif
1827 
1828 	/*
1829 	 *The reserved vram for firmware must be pinned to the specified
1830 	 *place on the VRAM, so reserve it early.
1831 	 */
1832 	r = amdgpu_ttm_fw_reserve_vram_init(adev);
1833 	if (r) {
1834 		return r;
1835 	}
1836 
1837 	/*
1838 	 * only NAVI10 and onwards ASIC support for IP discovery.
1839 	 * If IP discovery enabled, a block of memory should be
1840 	 * reserved for IP discovey.
1841 	 */
1842 	if (adev->mman.discovery_bin) {
1843 		r = amdgpu_ttm_reserve_tmr(adev);
1844 		if (r)
1845 			return r;
1846 	}
1847 
1848 	/* allocate memory as required for VGA
1849 	 * This is used for VGA emulation and pre-OS scanout buffers to
1850 	 * avoid display artifacts while transitioning between pre-OS
1851 	 * and driver.  */
1852 	r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size,
1853 				       AMDGPU_GEM_DOMAIN_VRAM,
1854 				       &adev->mman.stolen_vga_memory,
1855 				       NULL);
1856 	if (r)
1857 		return r;
1858 	r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
1859 				       adev->mman.stolen_extended_size,
1860 				       AMDGPU_GEM_DOMAIN_VRAM,
1861 				       &adev->mman.stolen_extended_memory,
1862 				       NULL);
1863 	if (r)
1864 		return r;
1865 
1866 	DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1867 		 (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
1868 
1869 	/* Compute GTT size, either bsaed on 3/4th the size of RAM size
1870 	 * or whatever the user passed on module init */
1871 	if (amdgpu_gtt_size == -1) {
1872 		struct sysinfo si;
1873 
1874 		si_meminfo(&si);
1875 		gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
1876 			       adev->gmc.mc_vram_size),
1877 			       ((uint64_t)si.totalram * si.mem_unit * 3/4));
1878 	}
1879 	else
1880 		gtt_size = (uint64_t)amdgpu_gtt_size << 20;
1881 
1882 	/* Initialize GTT memory pool */
1883 	r = amdgpu_gtt_mgr_init(adev, gtt_size);
1884 	if (r) {
1885 		DRM_ERROR("Failed initializing GTT heap.\n");
1886 		return r;
1887 	}
1888 	DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1889 		 (unsigned)(gtt_size / (1024 * 1024)));
1890 
1891 	/* Initialize various on-chip memory pools */
1892 	r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
1893 	if (r) {
1894 		DRM_ERROR("Failed initializing GDS heap.\n");
1895 		return r;
1896 	}
1897 
1898 	r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
1899 	if (r) {
1900 		DRM_ERROR("Failed initializing gws heap.\n");
1901 		return r;
1902 	}
1903 
1904 	r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
1905 	if (r) {
1906 		DRM_ERROR("Failed initializing oa heap.\n");
1907 		return r;
1908 	}
1909 
1910 	return 0;
1911 }
1912 
1913 /*
1914  * amdgpu_ttm_fini - De-initialize the TTM memory pools
1915  */
1916 void amdgpu_ttm_fini(struct amdgpu_device *adev)
1917 {
1918 	if (!adev->mman.initialized)
1919 		return;
1920 
1921 	amdgpu_ttm_training_reserve_vram_fini(adev);
1922 	/* return the stolen vga memory back to VRAM */
1923 	amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
1924 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
1925 	/* return the IP Discovery TMR memory back to VRAM */
1926 	amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
1927 	amdgpu_ttm_fw_reserve_vram_fini(adev);
1928 
1929 	if (adev->mman.aper_base_kaddr)
1930 		iounmap(adev->mman.aper_base_kaddr);
1931 	adev->mman.aper_base_kaddr = NULL;
1932 
1933 	amdgpu_vram_mgr_fini(adev);
1934 	amdgpu_gtt_mgr_fini(adev);
1935 	ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
1936 	ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
1937 	ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
1938 	ttm_bo_device_release(&adev->mman.bdev);
1939 	adev->mman.initialized = false;
1940 	DRM_INFO("amdgpu: ttm finalized\n");
1941 }
1942 
1943 /**
1944  * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
1945  *
1946  * @adev: amdgpu_device pointer
1947  * @enable: true when we can use buffer functions.
1948  *
1949  * Enable/disable use of buffer functions during suspend/resume. This should
1950  * only be called at bootup or when userspace isn't running.
1951  */
1952 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
1953 {
1954 	struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
1955 	uint64_t size;
1956 	int r;
1957 
1958 	if (!adev->mman.initialized || amdgpu_in_reset(adev) ||
1959 	    adev->mman.buffer_funcs_enabled == enable)
1960 		return;
1961 
1962 	if (enable) {
1963 		struct amdgpu_ring *ring;
1964 		struct drm_gpu_scheduler *sched;
1965 
1966 		ring = adev->mman.buffer_funcs_ring;
1967 		sched = &ring->sched;
1968 		r = drm_sched_entity_init(&adev->mman.entity,
1969 					  DRM_SCHED_PRIORITY_KERNEL, &sched,
1970 					  1, NULL);
1971 		if (r) {
1972 			DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
1973 				  r);
1974 			return;
1975 		}
1976 	} else {
1977 		drm_sched_entity_destroy(&adev->mman.entity);
1978 		dma_fence_put(man->move);
1979 		man->move = NULL;
1980 	}
1981 
1982 	/* this just adjusts TTM size idea, which sets lpfn to the correct value */
1983 	if (enable)
1984 		size = adev->gmc.real_vram_size;
1985 	else
1986 		size = adev->gmc.visible_vram_size;
1987 	man->size = size >> PAGE_SHIFT;
1988 	adev->mman.buffer_funcs_enabled = enable;
1989 }
1990 
1991 static vm_fault_t amdgpu_ttm_fault(struct vm_fault *vmf)
1992 {
1993 	struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
1994 	vm_fault_t ret;
1995 
1996 	ret = ttm_bo_vm_reserve(bo, vmf);
1997 	if (ret)
1998 		return ret;
1999 
2000 	ret = amdgpu_bo_fault_reserve_notify(bo);
2001 	if (ret)
2002 		goto unlock;
2003 
2004 	ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
2005 				       TTM_BO_VM_NUM_PREFAULT, 1);
2006 	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
2007 		return ret;
2008 
2009 unlock:
2010 	dma_resv_unlock(bo->base.resv);
2011 	return ret;
2012 }
2013 
2014 static struct vm_operations_struct amdgpu_ttm_vm_ops = {
2015 	.fault = amdgpu_ttm_fault,
2016 	.open = ttm_bo_vm_open,
2017 	.close = ttm_bo_vm_close,
2018 	.access = ttm_bo_vm_access
2019 };
2020 
2021 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
2022 {
2023 	struct drm_file *file_priv = filp->private_data;
2024 	struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev);
2025 	int r;
2026 
2027 	r = ttm_bo_mmap(filp, vma, &adev->mman.bdev);
2028 	if (unlikely(r != 0))
2029 		return r;
2030 
2031 	vma->vm_ops = &amdgpu_ttm_vm_ops;
2032 	return 0;
2033 }
2034 
2035 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
2036 		       uint64_t dst_offset, uint32_t byte_count,
2037 		       struct dma_resv *resv,
2038 		       struct dma_fence **fence, bool direct_submit,
2039 		       bool vm_needs_flush, bool tmz)
2040 {
2041 	enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT :
2042 		AMDGPU_IB_POOL_DELAYED;
2043 	struct amdgpu_device *adev = ring->adev;
2044 	struct amdgpu_job *job;
2045 
2046 	uint32_t max_bytes;
2047 	unsigned num_loops, num_dw;
2048 	unsigned i;
2049 	int r;
2050 
2051 	if (direct_submit && !ring->sched.ready) {
2052 		DRM_ERROR("Trying to move memory with ring turned off.\n");
2053 		return -EINVAL;
2054 	}
2055 
2056 	max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
2057 	num_loops = DIV_ROUND_UP(byte_count, max_bytes);
2058 	num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
2059 
2060 	r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, &job);
2061 	if (r)
2062 		return r;
2063 
2064 	if (vm_needs_flush) {
2065 		job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
2066 		job->vm_needs_flush = true;
2067 	}
2068 	if (resv) {
2069 		r = amdgpu_sync_resv(adev, &job->sync, resv,
2070 				     AMDGPU_SYNC_ALWAYS,
2071 				     AMDGPU_FENCE_OWNER_UNDEFINED);
2072 		if (r) {
2073 			DRM_ERROR("sync failed (%d).\n", r);
2074 			goto error_free;
2075 		}
2076 	}
2077 
2078 	for (i = 0; i < num_loops; i++) {
2079 		uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
2080 
2081 		amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
2082 					dst_offset, cur_size_in_bytes, tmz);
2083 
2084 		src_offset += cur_size_in_bytes;
2085 		dst_offset += cur_size_in_bytes;
2086 		byte_count -= cur_size_in_bytes;
2087 	}
2088 
2089 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2090 	WARN_ON(job->ibs[0].length_dw > num_dw);
2091 	if (direct_submit)
2092 		r = amdgpu_job_submit_direct(job, ring, fence);
2093 	else
2094 		r = amdgpu_job_submit(job, &adev->mman.entity,
2095 				      AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2096 	if (r)
2097 		goto error_free;
2098 
2099 	return r;
2100 
2101 error_free:
2102 	amdgpu_job_free(job);
2103 	DRM_ERROR("Error scheduling IBs (%d)\n", r);
2104 	return r;
2105 }
2106 
2107 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
2108 		       uint32_t src_data,
2109 		       struct dma_resv *resv,
2110 		       struct dma_fence **fence)
2111 {
2112 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
2113 	uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
2114 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2115 
2116 	struct drm_mm_node *mm_node;
2117 	unsigned long num_pages;
2118 	unsigned int num_loops, num_dw;
2119 
2120 	struct amdgpu_job *job;
2121 	int r;
2122 
2123 	if (!adev->mman.buffer_funcs_enabled) {
2124 		DRM_ERROR("Trying to clear memory with ring turned off.\n");
2125 		return -EINVAL;
2126 	}
2127 
2128 	if (bo->tbo.mem.mem_type == TTM_PL_TT) {
2129 		r = amdgpu_ttm_alloc_gart(&bo->tbo);
2130 		if (r)
2131 			return r;
2132 	}
2133 
2134 	num_pages = bo->tbo.num_pages;
2135 	mm_node = bo->tbo.mem.mm_node;
2136 	num_loops = 0;
2137 	while (num_pages) {
2138 		uint64_t byte_count = mm_node->size << PAGE_SHIFT;
2139 
2140 		num_loops += DIV_ROUND_UP_ULL(byte_count, max_bytes);
2141 		num_pages -= mm_node->size;
2142 		++mm_node;
2143 	}
2144 	num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
2145 
2146 	/* for IB padding */
2147 	num_dw += 64;
2148 
2149 	r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED,
2150 				     &job);
2151 	if (r)
2152 		return r;
2153 
2154 	if (resv) {
2155 		r = amdgpu_sync_resv(adev, &job->sync, resv,
2156 				     AMDGPU_SYNC_ALWAYS,
2157 				     AMDGPU_FENCE_OWNER_UNDEFINED);
2158 		if (r) {
2159 			DRM_ERROR("sync failed (%d).\n", r);
2160 			goto error_free;
2161 		}
2162 	}
2163 
2164 	num_pages = bo->tbo.num_pages;
2165 	mm_node = bo->tbo.mem.mm_node;
2166 
2167 	while (num_pages) {
2168 		uint64_t byte_count = mm_node->size << PAGE_SHIFT;
2169 		uint64_t dst_addr;
2170 
2171 		dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
2172 		while (byte_count) {
2173 			uint32_t cur_size_in_bytes = min_t(uint64_t, byte_count,
2174 							   max_bytes);
2175 
2176 			amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
2177 						dst_addr, cur_size_in_bytes);
2178 
2179 			dst_addr += cur_size_in_bytes;
2180 			byte_count -= cur_size_in_bytes;
2181 		}
2182 
2183 		num_pages -= mm_node->size;
2184 		++mm_node;
2185 	}
2186 
2187 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2188 	WARN_ON(job->ibs[0].length_dw > num_dw);
2189 	r = amdgpu_job_submit(job, &adev->mman.entity,
2190 			      AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2191 	if (r)
2192 		goto error_free;
2193 
2194 	return 0;
2195 
2196 error_free:
2197 	amdgpu_job_free(job);
2198 	return r;
2199 }
2200 
2201 #if defined(CONFIG_DEBUG_FS)
2202 
2203 static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
2204 {
2205 	struct drm_info_node *node = (struct drm_info_node *)m->private;
2206 	unsigned ttm_pl = (uintptr_t)node->info_ent->data;
2207 	struct drm_device *dev = node->minor->dev;
2208 	struct amdgpu_device *adev = drm_to_adev(dev);
2209 	struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, ttm_pl);
2210 	struct drm_printer p = drm_seq_file_printer(m);
2211 
2212 	man->func->debug(man, &p);
2213 	return 0;
2214 }
2215 
2216 static int amdgpu_ttm_pool_debugfs(struct seq_file *m, void *data)
2217 {
2218 	struct drm_info_node *node = (struct drm_info_node *)m->private;
2219 	struct drm_device *dev = node->minor->dev;
2220 	struct amdgpu_device *adev = drm_to_adev(dev);
2221 
2222 	return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
2223 }
2224 
2225 static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
2226 	{"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_VRAM},
2227 	{"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_TT},
2228 	{"amdgpu_gds_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GDS},
2229 	{"amdgpu_gws_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GWS},
2230 	{"amdgpu_oa_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_OA},
2231 	{"ttm_page_pool", amdgpu_ttm_pool_debugfs, 0, NULL},
2232 };
2233 
2234 /*
2235  * amdgpu_ttm_vram_read - Linear read access to VRAM
2236  *
2237  * Accesses VRAM via MMIO for debugging purposes.
2238  */
2239 static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
2240 				    size_t size, loff_t *pos)
2241 {
2242 	struct amdgpu_device *adev = file_inode(f)->i_private;
2243 	ssize_t result = 0;
2244 
2245 	if (size & 0x3 || *pos & 0x3)
2246 		return -EINVAL;
2247 
2248 	if (*pos >= adev->gmc.mc_vram_size)
2249 		return -ENXIO;
2250 
2251 	size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
2252 	while (size) {
2253 		size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
2254 		uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
2255 
2256 		amdgpu_device_vram_access(adev, *pos, value, bytes, false);
2257 		if (copy_to_user(buf, value, bytes))
2258 			return -EFAULT;
2259 
2260 		result += bytes;
2261 		buf += bytes;
2262 		*pos += bytes;
2263 		size -= bytes;
2264 	}
2265 
2266 	return result;
2267 }
2268 
2269 /*
2270  * amdgpu_ttm_vram_write - Linear write access to VRAM
2271  *
2272  * Accesses VRAM via MMIO for debugging purposes.
2273  */
2274 static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
2275 				    size_t size, loff_t *pos)
2276 {
2277 	struct amdgpu_device *adev = file_inode(f)->i_private;
2278 	ssize_t result = 0;
2279 	int r;
2280 
2281 	if (size & 0x3 || *pos & 0x3)
2282 		return -EINVAL;
2283 
2284 	if (*pos >= adev->gmc.mc_vram_size)
2285 		return -ENXIO;
2286 
2287 	while (size) {
2288 		unsigned long flags;
2289 		uint32_t value;
2290 
2291 		if (*pos >= adev->gmc.mc_vram_size)
2292 			return result;
2293 
2294 		r = get_user(value, (uint32_t *)buf);
2295 		if (r)
2296 			return r;
2297 
2298 		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
2299 		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
2300 		WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
2301 		WREG32_NO_KIQ(mmMM_DATA, value);
2302 		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
2303 
2304 		result += 4;
2305 		buf += 4;
2306 		*pos += 4;
2307 		size -= 4;
2308 	}
2309 
2310 	return result;
2311 }
2312 
2313 static const struct file_operations amdgpu_ttm_vram_fops = {
2314 	.owner = THIS_MODULE,
2315 	.read = amdgpu_ttm_vram_read,
2316 	.write = amdgpu_ttm_vram_write,
2317 	.llseek = default_llseek,
2318 };
2319 
2320 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2321 
2322 /*
2323  * amdgpu_ttm_gtt_read - Linear read access to GTT memory
2324  */
2325 static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
2326 				   size_t size, loff_t *pos)
2327 {
2328 	struct amdgpu_device *adev = file_inode(f)->i_private;
2329 	ssize_t result = 0;
2330 	int r;
2331 
2332 	while (size) {
2333 		loff_t p = *pos / PAGE_SIZE;
2334 		unsigned off = *pos & ~PAGE_MASK;
2335 		size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
2336 		struct page *page;
2337 		void *ptr;
2338 
2339 		if (p >= adev->gart.num_cpu_pages)
2340 			return result;
2341 
2342 		page = adev->gart.pages[p];
2343 		if (page) {
2344 			ptr = kmap(page);
2345 			ptr += off;
2346 
2347 			r = copy_to_user(buf, ptr, cur_size);
2348 			kunmap(adev->gart.pages[p]);
2349 		} else
2350 			r = clear_user(buf, cur_size);
2351 
2352 		if (r)
2353 			return -EFAULT;
2354 
2355 		result += cur_size;
2356 		buf += cur_size;
2357 		*pos += cur_size;
2358 		size -= cur_size;
2359 	}
2360 
2361 	return result;
2362 }
2363 
2364 static const struct file_operations amdgpu_ttm_gtt_fops = {
2365 	.owner = THIS_MODULE,
2366 	.read = amdgpu_ttm_gtt_read,
2367 	.llseek = default_llseek
2368 };
2369 
2370 #endif
2371 
2372 /*
2373  * amdgpu_iomem_read - Virtual read access to GPU mapped memory
2374  *
2375  * This function is used to read memory that has been mapped to the
2376  * GPU and the known addresses are not physical addresses but instead
2377  * bus addresses (e.g., what you'd put in an IB or ring buffer).
2378  */
2379 static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
2380 				 size_t size, loff_t *pos)
2381 {
2382 	struct amdgpu_device *adev = file_inode(f)->i_private;
2383 	struct iommu_domain *dom;
2384 	ssize_t result = 0;
2385 	int r;
2386 
2387 	/* retrieve the IOMMU domain if any for this device */
2388 	dom = iommu_get_domain_for_dev(adev->dev);
2389 
2390 	while (size) {
2391 		phys_addr_t addr = *pos & PAGE_MASK;
2392 		loff_t off = *pos & ~PAGE_MASK;
2393 		size_t bytes = PAGE_SIZE - off;
2394 		unsigned long pfn;
2395 		struct page *p;
2396 		void *ptr;
2397 
2398 		bytes = bytes < size ? bytes : size;
2399 
2400 		/* Translate the bus address to a physical address.  If
2401 		 * the domain is NULL it means there is no IOMMU active
2402 		 * and the address translation is the identity
2403 		 */
2404 		addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2405 
2406 		pfn = addr >> PAGE_SHIFT;
2407 		if (!pfn_valid(pfn))
2408 			return -EPERM;
2409 
2410 		p = pfn_to_page(pfn);
2411 		if (p->mapping != adev->mman.bdev.dev_mapping)
2412 			return -EPERM;
2413 
2414 		ptr = kmap(p);
2415 		r = copy_to_user(buf, ptr + off, bytes);
2416 		kunmap(p);
2417 		if (r)
2418 			return -EFAULT;
2419 
2420 		size -= bytes;
2421 		*pos += bytes;
2422 		result += bytes;
2423 	}
2424 
2425 	return result;
2426 }
2427 
2428 /*
2429  * amdgpu_iomem_write - Virtual write access to GPU mapped memory
2430  *
2431  * This function is used to write memory that has been mapped to the
2432  * GPU and the known addresses are not physical addresses but instead
2433  * bus addresses (e.g., what you'd put in an IB or ring buffer).
2434  */
2435 static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2436 				 size_t size, loff_t *pos)
2437 {
2438 	struct amdgpu_device *adev = file_inode(f)->i_private;
2439 	struct iommu_domain *dom;
2440 	ssize_t result = 0;
2441 	int r;
2442 
2443 	dom = iommu_get_domain_for_dev(adev->dev);
2444 
2445 	while (size) {
2446 		phys_addr_t addr = *pos & PAGE_MASK;
2447 		loff_t off = *pos & ~PAGE_MASK;
2448 		size_t bytes = PAGE_SIZE - off;
2449 		unsigned long pfn;
2450 		struct page *p;
2451 		void *ptr;
2452 
2453 		bytes = bytes < size ? bytes : size;
2454 
2455 		addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2456 
2457 		pfn = addr >> PAGE_SHIFT;
2458 		if (!pfn_valid(pfn))
2459 			return -EPERM;
2460 
2461 		p = pfn_to_page(pfn);
2462 		if (p->mapping != adev->mman.bdev.dev_mapping)
2463 			return -EPERM;
2464 
2465 		ptr = kmap(p);
2466 		r = copy_from_user(ptr + off, buf, bytes);
2467 		kunmap(p);
2468 		if (r)
2469 			return -EFAULT;
2470 
2471 		size -= bytes;
2472 		*pos += bytes;
2473 		result += bytes;
2474 	}
2475 
2476 	return result;
2477 }
2478 
2479 static const struct file_operations amdgpu_ttm_iomem_fops = {
2480 	.owner = THIS_MODULE,
2481 	.read = amdgpu_iomem_read,
2482 	.write = amdgpu_iomem_write,
2483 	.llseek = default_llseek
2484 };
2485 
2486 static const struct {
2487 	char *name;
2488 	const struct file_operations *fops;
2489 	int domain;
2490 } ttm_debugfs_entries[] = {
2491 	{ "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM },
2492 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2493 	{ "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
2494 #endif
2495 	{ "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM },
2496 };
2497 
2498 #endif
2499 
2500 int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2501 {
2502 #if defined(CONFIG_DEBUG_FS)
2503 	unsigned count;
2504 
2505 	struct drm_minor *minor = adev_to_drm(adev)->primary;
2506 	struct dentry *ent, *root = minor->debugfs_root;
2507 
2508 	for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
2509 		ent = debugfs_create_file(
2510 				ttm_debugfs_entries[count].name,
2511 				S_IFREG | S_IRUGO, root,
2512 				adev,
2513 				ttm_debugfs_entries[count].fops);
2514 		if (IS_ERR(ent))
2515 			return PTR_ERR(ent);
2516 		if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
2517 			i_size_write(ent->d_inode, adev->gmc.mc_vram_size);
2518 		else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
2519 			i_size_write(ent->d_inode, adev->gmc.gart_size);
2520 		adev->mman.debugfs_entries[count] = ent;
2521 	}
2522 
2523 	count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
2524 	return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
2525 #else
2526 	return 0;
2527 #endif
2528 }
2529