1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <linux/list.h>
33 #include <linux/slab.h>
34 
35 #include <drm/amdgpu_drm.h>
36 #include <drm/drm_cache.h>
37 #include "amdgpu.h"
38 #include "amdgpu_trace.h"
39 #include "amdgpu_amdkfd.h"
40 
41 /**
42  * DOC: amdgpu_object
43  *
44  * This defines the interfaces to operate on an &amdgpu_bo buffer object which
45  * represents memory used by driver (VRAM, system memory, etc.). The driver
46  * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
47  * to create/destroy/set buffer object which are then managed by the kernel TTM
48  * memory manager.
49  * The interfaces are also used internally by kernel clients, including gfx,
50  * uvd, etc. for kernel managed allocations used by the GPU.
51  *
52  */
53 
54 /**
55  * amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting
56  *
57  * @bo: &amdgpu_bo buffer object
58  *
59  * This function is called when a BO stops being pinned, and updates the
60  * &amdgpu_device pin_size values accordingly.
61  */
62 static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo)
63 {
64 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
65 
66 	if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
67 		atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
68 		atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
69 			     &adev->visible_pin_size);
70 	} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
71 		atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
72 	}
73 }
74 
75 static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
76 {
77 	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
78 	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
79 
80 	if (bo->pin_count > 0)
81 		amdgpu_bo_subtract_pin_size(bo);
82 
83 	amdgpu_bo_kunmap(bo);
84 
85 	if (bo->tbo.base.import_attach)
86 		drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
87 	drm_gem_object_release(&bo->tbo.base);
88 	/* in case amdgpu_device_recover_vram got NULL of bo->parent */
89 	if (!list_empty(&bo->shadow_list)) {
90 		mutex_lock(&adev->shadow_list_lock);
91 		list_del_init(&bo->shadow_list);
92 		mutex_unlock(&adev->shadow_list_lock);
93 	}
94 	amdgpu_bo_unref(&bo->parent);
95 
96 	kfree(bo->metadata);
97 	kfree(bo);
98 }
99 
100 /**
101  * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
102  * @bo: buffer object to be checked
103  *
104  * Uses destroy function associated with the object to determine if this is
105  * an &amdgpu_bo.
106  *
107  * Returns:
108  * true if the object belongs to &amdgpu_bo, false if not.
109  */
110 bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
111 {
112 	if (bo->destroy == &amdgpu_bo_destroy)
113 		return true;
114 	return false;
115 }
116 
117 /**
118  * amdgpu_bo_placement_from_domain - set buffer's placement
119  * @abo: &amdgpu_bo buffer object whose placement is to be set
120  * @domain: requested domain
121  *
122  * Sets buffer's placement according to requested domain and the buffer's
123  * flags.
124  */
125 void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
126 {
127 	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
128 	struct ttm_placement *placement = &abo->placement;
129 	struct ttm_place *places = abo->placements;
130 	u64 flags = abo->flags;
131 	u32 c = 0;
132 
133 	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
134 		unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
135 
136 		places[c].fpfn = 0;
137 		places[c].lpfn = 0;
138 		places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
139 			TTM_PL_FLAG_VRAM;
140 
141 		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
142 			places[c].lpfn = visible_pfn;
143 		else
144 			places[c].flags |= TTM_PL_FLAG_TOPDOWN;
145 
146 		if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
147 			places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
148 		c++;
149 	}
150 
151 	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
152 		places[c].fpfn = 0;
153 		places[c].lpfn = 0;
154 		places[c].flags = TTM_PL_FLAG_TT;
155 		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
156 			places[c].flags |= TTM_PL_FLAG_WC |
157 				TTM_PL_FLAG_UNCACHED;
158 		else
159 			places[c].flags |= TTM_PL_FLAG_CACHED;
160 		c++;
161 	}
162 
163 	if (domain & AMDGPU_GEM_DOMAIN_CPU) {
164 		places[c].fpfn = 0;
165 		places[c].lpfn = 0;
166 		places[c].flags = TTM_PL_FLAG_SYSTEM;
167 		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
168 			places[c].flags |= TTM_PL_FLAG_WC |
169 				TTM_PL_FLAG_UNCACHED;
170 		else
171 			places[c].flags |= TTM_PL_FLAG_CACHED;
172 		c++;
173 	}
174 
175 	if (domain & AMDGPU_GEM_DOMAIN_GDS) {
176 		places[c].fpfn = 0;
177 		places[c].lpfn = 0;
178 		places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
179 		c++;
180 	}
181 
182 	if (domain & AMDGPU_GEM_DOMAIN_GWS) {
183 		places[c].fpfn = 0;
184 		places[c].lpfn = 0;
185 		places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
186 		c++;
187 	}
188 
189 	if (domain & AMDGPU_GEM_DOMAIN_OA) {
190 		places[c].fpfn = 0;
191 		places[c].lpfn = 0;
192 		places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
193 		c++;
194 	}
195 
196 	if (!c) {
197 		places[c].fpfn = 0;
198 		places[c].lpfn = 0;
199 		places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
200 		c++;
201 	}
202 
203 	BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS);
204 
205 	placement->num_placement = c;
206 	placement->placement = places;
207 
208 	placement->num_busy_placement = c;
209 	placement->busy_placement = places;
210 }
211 
212 /**
213  * amdgpu_bo_create_reserved - create reserved BO for kernel use
214  *
215  * @adev: amdgpu device object
216  * @size: size for the new BO
217  * @align: alignment for the new BO
218  * @domain: where to place it
219  * @bo_ptr: used to initialize BOs in structures
220  * @gpu_addr: GPU addr of the pinned BO
221  * @cpu_addr: optional CPU address mapping
222  *
223  * Allocates and pins a BO for kernel internal use, and returns it still
224  * reserved.
225  *
226  * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
227  *
228  * Returns:
229  * 0 on success, negative error code otherwise.
230  */
231 int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
232 			      unsigned long size, int align,
233 			      u32 domain, struct amdgpu_bo **bo_ptr,
234 			      u64 *gpu_addr, void **cpu_addr)
235 {
236 	struct amdgpu_bo_param bp;
237 	bool free = false;
238 	int r;
239 
240 	if (!size) {
241 		amdgpu_bo_unref(bo_ptr);
242 		return 0;
243 	}
244 
245 	memset(&bp, 0, sizeof(bp));
246 	bp.size = size;
247 	bp.byte_align = align;
248 	bp.domain = domain;
249 	bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
250 		: AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
251 	bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
252 	bp.type = ttm_bo_type_kernel;
253 	bp.resv = NULL;
254 
255 	if (!*bo_ptr) {
256 		r = amdgpu_bo_create(adev, &bp, bo_ptr);
257 		if (r) {
258 			dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
259 				r);
260 			return r;
261 		}
262 		free = true;
263 	}
264 
265 	r = amdgpu_bo_reserve(*bo_ptr, false);
266 	if (r) {
267 		dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
268 		goto error_free;
269 	}
270 
271 	r = amdgpu_bo_pin(*bo_ptr, domain);
272 	if (r) {
273 		dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
274 		goto error_unreserve;
275 	}
276 
277 	r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
278 	if (r) {
279 		dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
280 		goto error_unpin;
281 	}
282 
283 	if (gpu_addr)
284 		*gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
285 
286 	if (cpu_addr) {
287 		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
288 		if (r) {
289 			dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
290 			goto error_unpin;
291 		}
292 	}
293 
294 	return 0;
295 
296 error_unpin:
297 	amdgpu_bo_unpin(*bo_ptr);
298 error_unreserve:
299 	amdgpu_bo_unreserve(*bo_ptr);
300 
301 error_free:
302 	if (free)
303 		amdgpu_bo_unref(bo_ptr);
304 
305 	return r;
306 }
307 
308 /**
309  * amdgpu_bo_create_kernel - create BO for kernel use
310  *
311  * @adev: amdgpu device object
312  * @size: size for the new BO
313  * @align: alignment for the new BO
314  * @domain: where to place it
315  * @bo_ptr:  used to initialize BOs in structures
316  * @gpu_addr: GPU addr of the pinned BO
317  * @cpu_addr: optional CPU address mapping
318  *
319  * Allocates and pins a BO for kernel internal use.
320  *
321  * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
322  *
323  * Returns:
324  * 0 on success, negative error code otherwise.
325  */
326 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
327 			    unsigned long size, int align,
328 			    u32 domain, struct amdgpu_bo **bo_ptr,
329 			    u64 *gpu_addr, void **cpu_addr)
330 {
331 	int r;
332 
333 	r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
334 				      gpu_addr, cpu_addr);
335 
336 	if (r)
337 		return r;
338 
339 	if (*bo_ptr)
340 		amdgpu_bo_unreserve(*bo_ptr);
341 
342 	return 0;
343 }
344 
345 /**
346  * amdgpu_bo_free_kernel - free BO for kernel use
347  *
348  * @bo: amdgpu BO to free
349  * @gpu_addr: pointer to where the BO's GPU memory space address was stored
350  * @cpu_addr: pointer to where the BO's CPU memory space address was stored
351  *
352  * unmaps and unpin a BO for kernel internal use.
353  */
354 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
355 			   void **cpu_addr)
356 {
357 	if (*bo == NULL)
358 		return;
359 
360 	if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
361 		if (cpu_addr)
362 			amdgpu_bo_kunmap(*bo);
363 
364 		amdgpu_bo_unpin(*bo);
365 		amdgpu_bo_unreserve(*bo);
366 	}
367 	amdgpu_bo_unref(bo);
368 
369 	if (gpu_addr)
370 		*gpu_addr = 0;
371 
372 	if (cpu_addr)
373 		*cpu_addr = NULL;
374 }
375 
376 /* Validate bo size is bit bigger then the request domain */
377 static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
378 					  unsigned long size, u32 domain)
379 {
380 	struct ttm_mem_type_manager *man = NULL;
381 
382 	/*
383 	 * If GTT is part of requested domains the check must succeed to
384 	 * allow fall back to GTT
385 	 */
386 	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
387 		man = &adev->mman.bdev.man[TTM_PL_TT];
388 
389 		if (size < (man->size << PAGE_SHIFT))
390 			return true;
391 		else
392 			goto fail;
393 	}
394 
395 	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
396 		man = &adev->mman.bdev.man[TTM_PL_VRAM];
397 
398 		if (size < (man->size << PAGE_SHIFT))
399 			return true;
400 		else
401 			goto fail;
402 	}
403 
404 
405 	/* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
406 	return true;
407 
408 fail:
409 	DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
410 		  man->size << PAGE_SHIFT);
411 	return false;
412 }
413 
414 bool amdgpu_bo_support_uswc(u64 bo_flags)
415 {
416 
417 #ifdef CONFIG_X86_32
418 	/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
419 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
420 	 */
421 	return false;
422 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
423 	/* Don't try to enable write-combining when it can't work, or things
424 	 * may be slow
425 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
426 	 */
427 
428 #ifndef CONFIG_COMPILE_TEST
429 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
430 	 thanks to write-combining
431 #endif
432 
433 	if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
434 		DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
435 			      "better performance thanks to write-combining\n");
436 	return false;
437 #else
438 	/* For architectures that don't support WC memory,
439 	 * mask out the WC flag from the BO
440 	 */
441 	if (!drm_arch_can_wc_memory())
442 		return false;
443 
444 	return true;
445 #endif
446 }
447 
448 static int amdgpu_bo_do_create(struct amdgpu_device *adev,
449 			       struct amdgpu_bo_param *bp,
450 			       struct amdgpu_bo **bo_ptr)
451 {
452 	struct ttm_operation_ctx ctx = {
453 		.interruptible = (bp->type != ttm_bo_type_kernel),
454 		.no_wait_gpu = false,
455 		.resv = bp->resv,
456 		.flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
457 	};
458 	struct amdgpu_bo *bo;
459 	unsigned long page_align, size = bp->size;
460 	size_t acc_size;
461 	int r;
462 
463 	/* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
464 	if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
465 		/* GWS and OA don't need any alignment. */
466 		page_align = bp->byte_align;
467 		size <<= PAGE_SHIFT;
468 	} else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
469 		/* Both size and alignment must be a multiple of 4. */
470 		page_align = ALIGN(bp->byte_align, 4);
471 		size = ALIGN(size, 4) << PAGE_SHIFT;
472 	} else {
473 		/* Memory should be aligned at least to a page size. */
474 		page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
475 		size = ALIGN(size, PAGE_SIZE);
476 	}
477 
478 	if (!amdgpu_bo_validate_size(adev, size, bp->domain))
479 		return -ENOMEM;
480 
481 	*bo_ptr = NULL;
482 
483 	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
484 				       sizeof(struct amdgpu_bo));
485 
486 	bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
487 	if (bo == NULL)
488 		return -ENOMEM;
489 	drm_gem_private_object_init(adev->ddev, &bo->tbo.base, size);
490 	INIT_LIST_HEAD(&bo->shadow_list);
491 	bo->vm_bo = NULL;
492 	bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
493 		bp->domain;
494 	bo->allowed_domains = bo->preferred_domains;
495 	if (bp->type != ttm_bo_type_kernel &&
496 	    bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
497 		bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
498 
499 	bo->flags = bp->flags;
500 
501 	if (!amdgpu_bo_support_uswc(bo->flags))
502 		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
503 
504 	bo->tbo.bdev = &adev->mman.bdev;
505 	if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
506 			  AMDGPU_GEM_DOMAIN_GDS))
507 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
508 	else
509 		amdgpu_bo_placement_from_domain(bo, bp->domain);
510 	if (bp->type == ttm_bo_type_kernel)
511 		bo->tbo.priority = 1;
512 
513 	r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
514 				 &bo->placement, page_align, &ctx, acc_size,
515 				 NULL, bp->resv, &amdgpu_bo_destroy);
516 	if (unlikely(r != 0))
517 		return r;
518 
519 	if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
520 	    bo->tbo.mem.mem_type == TTM_PL_VRAM &&
521 	    bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
522 		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
523 					     ctx.bytes_moved);
524 	else
525 		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
526 
527 	if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
528 	    bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
529 		struct dma_fence *fence;
530 
531 		r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
532 		if (unlikely(r))
533 			goto fail_unreserve;
534 
535 		amdgpu_bo_fence(bo, fence, false);
536 		dma_fence_put(bo->tbo.moving);
537 		bo->tbo.moving = dma_fence_get(fence);
538 		dma_fence_put(fence);
539 	}
540 	if (!bp->resv)
541 		amdgpu_bo_unreserve(bo);
542 	*bo_ptr = bo;
543 
544 	trace_amdgpu_bo_create(bo);
545 
546 	/* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
547 	if (bp->type == ttm_bo_type_device)
548 		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
549 
550 	return 0;
551 
552 fail_unreserve:
553 	if (!bp->resv)
554 		dma_resv_unlock(bo->tbo.base.resv);
555 	amdgpu_bo_unref(&bo);
556 	return r;
557 }
558 
559 static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
560 				   unsigned long size,
561 				   struct amdgpu_bo *bo)
562 {
563 	struct amdgpu_bo_param bp;
564 	int r;
565 
566 	if (bo->shadow)
567 		return 0;
568 
569 	memset(&bp, 0, sizeof(bp));
570 	bp.size = size;
571 	bp.domain = AMDGPU_GEM_DOMAIN_GTT;
572 	bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
573 		AMDGPU_GEM_CREATE_SHADOW;
574 	bp.type = ttm_bo_type_kernel;
575 	bp.resv = bo->tbo.base.resv;
576 
577 	r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
578 	if (!r) {
579 		bo->shadow->parent = amdgpu_bo_ref(bo);
580 		mutex_lock(&adev->shadow_list_lock);
581 		list_add_tail(&bo->shadow->shadow_list, &adev->shadow_list);
582 		mutex_unlock(&adev->shadow_list_lock);
583 	}
584 
585 	return r;
586 }
587 
588 /**
589  * amdgpu_bo_create - create an &amdgpu_bo buffer object
590  * @adev: amdgpu device object
591  * @bp: parameters to be used for the buffer object
592  * @bo_ptr: pointer to the buffer object pointer
593  *
594  * Creates an &amdgpu_bo buffer object; and if requested, also creates a
595  * shadow object.
596  * Shadow object is used to backup the original buffer object, and is always
597  * in GTT.
598  *
599  * Returns:
600  * 0 for success or a negative error code on failure.
601  */
602 int amdgpu_bo_create(struct amdgpu_device *adev,
603 		     struct amdgpu_bo_param *bp,
604 		     struct amdgpu_bo **bo_ptr)
605 {
606 	u64 flags = bp->flags;
607 	int r;
608 
609 	bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
610 	r = amdgpu_bo_do_create(adev, bp, bo_ptr);
611 	if (r)
612 		return r;
613 
614 	if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) {
615 		if (!bp->resv)
616 			WARN_ON(dma_resv_lock((*bo_ptr)->tbo.base.resv,
617 							NULL));
618 
619 		r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr);
620 
621 		if (!bp->resv)
622 			dma_resv_unlock((*bo_ptr)->tbo.base.resv);
623 
624 		if (r)
625 			amdgpu_bo_unref(bo_ptr);
626 	}
627 
628 	return r;
629 }
630 
631 /**
632  * amdgpu_bo_validate - validate an &amdgpu_bo buffer object
633  * @bo: pointer to the buffer object
634  *
635  * Sets placement according to domain; and changes placement and caching
636  * policy of the buffer object according to the placement.
637  * This is used for validating shadow bos.  It calls ttm_bo_validate() to
638  * make sure the buffer is resident where it needs to be.
639  *
640  * Returns:
641  * 0 for success or a negative error code on failure.
642  */
643 int amdgpu_bo_validate(struct amdgpu_bo *bo)
644 {
645 	struct ttm_operation_ctx ctx = { false, false };
646 	uint32_t domain;
647 	int r;
648 
649 	if (bo->pin_count)
650 		return 0;
651 
652 	domain = bo->preferred_domains;
653 
654 retry:
655 	amdgpu_bo_placement_from_domain(bo, domain);
656 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
657 	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
658 		domain = bo->allowed_domains;
659 		goto retry;
660 	}
661 
662 	return r;
663 }
664 
665 /**
666  * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow
667  *
668  * @shadow: &amdgpu_bo shadow to be restored
669  * @fence: dma_fence associated with the operation
670  *
671  * Copies a buffer object's shadow content back to the object.
672  * This is used for recovering a buffer from its shadow in case of a gpu
673  * reset where vram context may be lost.
674  *
675  * Returns:
676  * 0 for success or a negative error code on failure.
677  */
678 int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
679 
680 {
681 	struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev);
682 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
683 	uint64_t shadow_addr, parent_addr;
684 
685 	shadow_addr = amdgpu_bo_gpu_offset(shadow);
686 	parent_addr = amdgpu_bo_gpu_offset(shadow->parent);
687 
688 	return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
689 				  amdgpu_bo_size(shadow), NULL, fence,
690 				  true, false);
691 }
692 
693 /**
694  * amdgpu_bo_kmap - map an &amdgpu_bo buffer object
695  * @bo: &amdgpu_bo buffer object to be mapped
696  * @ptr: kernel virtual address to be returned
697  *
698  * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
699  * amdgpu_bo_kptr() to get the kernel virtual address.
700  *
701  * Returns:
702  * 0 for success or a negative error code on failure.
703  */
704 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
705 {
706 	void *kptr;
707 	long r;
708 
709 	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
710 		return -EPERM;
711 
712 	kptr = amdgpu_bo_kptr(bo);
713 	if (kptr) {
714 		if (ptr)
715 			*ptr = kptr;
716 		return 0;
717 	}
718 
719 	r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
720 						MAX_SCHEDULE_TIMEOUT);
721 	if (r < 0)
722 		return r;
723 
724 	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
725 	if (r)
726 		return r;
727 
728 	if (ptr)
729 		*ptr = amdgpu_bo_kptr(bo);
730 
731 	return 0;
732 }
733 
734 /**
735  * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
736  * @bo: &amdgpu_bo buffer object
737  *
738  * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
739  *
740  * Returns:
741  * the virtual address of a buffer object area.
742  */
743 void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
744 {
745 	bool is_iomem;
746 
747 	return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
748 }
749 
750 /**
751  * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
752  * @bo: &amdgpu_bo buffer object to be unmapped
753  *
754  * Unmaps a kernel map set up by amdgpu_bo_kmap().
755  */
756 void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
757 {
758 	if (bo->kmap.bo)
759 		ttm_bo_kunmap(&bo->kmap);
760 }
761 
762 /**
763  * amdgpu_bo_ref - reference an &amdgpu_bo buffer object
764  * @bo: &amdgpu_bo buffer object
765  *
766  * References the contained &ttm_buffer_object.
767  *
768  * Returns:
769  * a refcounted pointer to the &amdgpu_bo buffer object.
770  */
771 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
772 {
773 	if (bo == NULL)
774 		return NULL;
775 
776 	ttm_bo_get(&bo->tbo);
777 	return bo;
778 }
779 
780 /**
781  * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
782  * @bo: &amdgpu_bo buffer object
783  *
784  * Unreferences the contained &ttm_buffer_object and clear the pointer
785  */
786 void amdgpu_bo_unref(struct amdgpu_bo **bo)
787 {
788 	struct ttm_buffer_object *tbo;
789 
790 	if ((*bo) == NULL)
791 		return;
792 
793 	tbo = &((*bo)->tbo);
794 	ttm_bo_put(tbo);
795 	*bo = NULL;
796 }
797 
798 /**
799  * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object
800  * @bo: &amdgpu_bo buffer object to be pinned
801  * @domain: domain to be pinned to
802  * @min_offset: the start of requested address range
803  * @max_offset: the end of requested address range
804  *
805  * Pins the buffer object according to requested domain and address range. If
806  * the memory is unbound gart memory, binds the pages into gart table. Adjusts
807  * pin_count and pin_size accordingly.
808  *
809  * Pinning means to lock pages in memory along with keeping them at a fixed
810  * offset. It is required when a buffer can not be moved, for example, when
811  * a display buffer is being scanned out.
812  *
813  * Compared with amdgpu_bo_pin(), this function gives more flexibility on
814  * where to pin a buffer if there are specific restrictions on where a buffer
815  * must be located.
816  *
817  * Returns:
818  * 0 for success or a negative error code on failure.
819  */
820 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
821 			     u64 min_offset, u64 max_offset)
822 {
823 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
824 	struct ttm_operation_ctx ctx = { false, false };
825 	int r, i;
826 
827 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
828 		return -EPERM;
829 
830 	if (WARN_ON_ONCE(min_offset > max_offset))
831 		return -EINVAL;
832 
833 	/* A shared bo cannot be migrated to VRAM */
834 	if (bo->prime_shared_count) {
835 		if (domain & AMDGPU_GEM_DOMAIN_GTT)
836 			domain = AMDGPU_GEM_DOMAIN_GTT;
837 		else
838 			return -EINVAL;
839 	}
840 
841 	/* This assumes only APU display buffers are pinned with (VRAM|GTT).
842 	 * See function amdgpu_display_supported_domains()
843 	 */
844 	domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
845 
846 	if (bo->pin_count) {
847 		uint32_t mem_type = bo->tbo.mem.mem_type;
848 
849 		if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
850 			return -EINVAL;
851 
852 		bo->pin_count++;
853 
854 		if (max_offset != 0) {
855 			u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
856 			WARN_ON_ONCE(max_offset <
857 				     (amdgpu_bo_gpu_offset(bo) - domain_start));
858 		}
859 
860 		return 0;
861 	}
862 
863 	bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
864 	/* force to pin into visible video ram */
865 	if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
866 		bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
867 	amdgpu_bo_placement_from_domain(bo, domain);
868 	for (i = 0; i < bo->placement.num_placement; i++) {
869 		unsigned fpfn, lpfn;
870 
871 		fpfn = min_offset >> PAGE_SHIFT;
872 		lpfn = max_offset >> PAGE_SHIFT;
873 
874 		if (fpfn > bo->placements[i].fpfn)
875 			bo->placements[i].fpfn = fpfn;
876 		if (!bo->placements[i].lpfn ||
877 		    (lpfn && lpfn < bo->placements[i].lpfn))
878 			bo->placements[i].lpfn = lpfn;
879 		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
880 	}
881 
882 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
883 	if (unlikely(r)) {
884 		dev_err(adev->dev, "%p pin failed\n", bo);
885 		goto error;
886 	}
887 
888 	bo->pin_count = 1;
889 
890 	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
891 	if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
892 		atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
893 		atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
894 			     &adev->visible_pin_size);
895 	} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
896 		atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
897 	}
898 
899 error:
900 	return r;
901 }
902 
903 /**
904  * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
905  * @bo: &amdgpu_bo buffer object to be pinned
906  * @domain: domain to be pinned to
907  *
908  * A simple wrapper to amdgpu_bo_pin_restricted().
909  * Provides a simpler API for buffers that do not have any strict restrictions
910  * on where a buffer must be located.
911  *
912  * Returns:
913  * 0 for success or a negative error code on failure.
914  */
915 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
916 {
917 	return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
918 }
919 
920 /**
921  * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
922  * @bo: &amdgpu_bo buffer object to be unpinned
923  *
924  * Decreases the pin_count, and clears the flags if pin_count reaches 0.
925  * Changes placement and pin size accordingly.
926  *
927  * Returns:
928  * 0 for success or a negative error code on failure.
929  */
930 int amdgpu_bo_unpin(struct amdgpu_bo *bo)
931 {
932 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
933 	struct ttm_operation_ctx ctx = { false, false };
934 	int r, i;
935 
936 	if (WARN_ON_ONCE(!bo->pin_count)) {
937 		dev_warn(adev->dev, "%p unpin not necessary\n", bo);
938 		return 0;
939 	}
940 	bo->pin_count--;
941 	if (bo->pin_count)
942 		return 0;
943 
944 	amdgpu_bo_subtract_pin_size(bo);
945 
946 	for (i = 0; i < bo->placement.num_placement; i++) {
947 		bo->placements[i].lpfn = 0;
948 		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
949 	}
950 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
951 	if (unlikely(r))
952 		dev_err(adev->dev, "%p validate failed for unpin\n", bo);
953 
954 	return r;
955 }
956 
957 /**
958  * amdgpu_bo_evict_vram - evict VRAM buffers
959  * @adev: amdgpu device object
960  *
961  * Evicts all VRAM buffers on the lru list of the memory type.
962  * Mainly used for evicting vram at suspend time.
963  *
964  * Returns:
965  * 0 for success or a negative error code on failure.
966  */
967 int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
968 {
969 	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
970 #ifndef CONFIG_HIBERNATION
971 	if (adev->flags & AMD_IS_APU) {
972 		/* Useless to evict on IGP chips */
973 		return 0;
974 	}
975 #endif
976 	return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
977 }
978 
979 static const char *amdgpu_vram_names[] = {
980 	"UNKNOWN",
981 	"GDDR1",
982 	"DDR2",
983 	"GDDR3",
984 	"GDDR4",
985 	"GDDR5",
986 	"HBM",
987 	"DDR3",
988 	"DDR4",
989 	"GDDR6",
990 };
991 
992 /**
993  * amdgpu_bo_init - initialize memory manager
994  * @adev: amdgpu device object
995  *
996  * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
997  *
998  * Returns:
999  * 0 for success or a negative error code on failure.
1000  */
1001 int amdgpu_bo_init(struct amdgpu_device *adev)
1002 {
1003 	/* reserve PAT memory space to WC for VRAM */
1004 	arch_io_reserve_memtype_wc(adev->gmc.aper_base,
1005 				   adev->gmc.aper_size);
1006 
1007 	/* Add an MTRR for the VRAM */
1008 	adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
1009 					      adev->gmc.aper_size);
1010 	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
1011 		 adev->gmc.mc_vram_size >> 20,
1012 		 (unsigned long long)adev->gmc.aper_size >> 20);
1013 	DRM_INFO("RAM width %dbits %s\n",
1014 		 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
1015 	return amdgpu_ttm_init(adev);
1016 }
1017 
1018 /**
1019  * amdgpu_bo_late_init - late init
1020  * @adev: amdgpu device object
1021  *
1022  * Calls amdgpu_ttm_late_init() to free resources used earlier during
1023  * initialization.
1024  *
1025  * Returns:
1026  * 0 for success or a negative error code on failure.
1027  */
1028 int amdgpu_bo_late_init(struct amdgpu_device *adev)
1029 {
1030 	amdgpu_ttm_late_init(adev);
1031 
1032 	return 0;
1033 }
1034 
1035 /**
1036  * amdgpu_bo_fini - tear down memory manager
1037  * @adev: amdgpu device object
1038  *
1039  * Reverses amdgpu_bo_init() to tear down memory manager.
1040  */
1041 void amdgpu_bo_fini(struct amdgpu_device *adev)
1042 {
1043 	amdgpu_ttm_fini(adev);
1044 	arch_phys_wc_del(adev->gmc.vram_mtrr);
1045 	arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
1046 }
1047 
1048 /**
1049  * amdgpu_bo_fbdev_mmap - mmap fbdev memory
1050  * @bo: &amdgpu_bo buffer object
1051  * @vma: vma as input from the fbdev mmap method
1052  *
1053  * Calls ttm_fbdev_mmap() to mmap fbdev memory if it is backed by a bo.
1054  *
1055  * Returns:
1056  * 0 for success or a negative error code on failure.
1057  */
1058 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
1059 			     struct vm_area_struct *vma)
1060 {
1061 	return ttm_fbdev_mmap(vma, &bo->tbo);
1062 }
1063 
1064 /**
1065  * amdgpu_bo_set_tiling_flags - set tiling flags
1066  * @bo: &amdgpu_bo buffer object
1067  * @tiling_flags: new flags
1068  *
1069  * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
1070  * kernel driver to set the tiling flags on a buffer.
1071  *
1072  * Returns:
1073  * 0 for success or a negative error code on failure.
1074  */
1075 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
1076 {
1077 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1078 
1079 	if (adev->family <= AMDGPU_FAMILY_CZ &&
1080 	    AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
1081 		return -EINVAL;
1082 
1083 	bo->tiling_flags = tiling_flags;
1084 	return 0;
1085 }
1086 
1087 /**
1088  * amdgpu_bo_get_tiling_flags - get tiling flags
1089  * @bo: &amdgpu_bo buffer object
1090  * @tiling_flags: returned flags
1091  *
1092  * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
1093  * set the tiling flags on a buffer.
1094  */
1095 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
1096 {
1097 	dma_resv_assert_held(bo->tbo.base.resv);
1098 
1099 	if (tiling_flags)
1100 		*tiling_flags = bo->tiling_flags;
1101 }
1102 
1103 /**
1104  * amdgpu_bo_set_metadata - set metadata
1105  * @bo: &amdgpu_bo buffer object
1106  * @metadata: new metadata
1107  * @metadata_size: size of the new metadata
1108  * @flags: flags of the new metadata
1109  *
1110  * Sets buffer object's metadata, its size and flags.
1111  * Used via GEM ioctl.
1112  *
1113  * Returns:
1114  * 0 for success or a negative error code on failure.
1115  */
1116 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
1117 			    uint32_t metadata_size, uint64_t flags)
1118 {
1119 	void *buffer;
1120 
1121 	if (!metadata_size) {
1122 		if (bo->metadata_size) {
1123 			kfree(bo->metadata);
1124 			bo->metadata = NULL;
1125 			bo->metadata_size = 0;
1126 		}
1127 		return 0;
1128 	}
1129 
1130 	if (metadata == NULL)
1131 		return -EINVAL;
1132 
1133 	buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
1134 	if (buffer == NULL)
1135 		return -ENOMEM;
1136 
1137 	kfree(bo->metadata);
1138 	bo->metadata_flags = flags;
1139 	bo->metadata = buffer;
1140 	bo->metadata_size = metadata_size;
1141 
1142 	return 0;
1143 }
1144 
1145 /**
1146  * amdgpu_bo_get_metadata - get metadata
1147  * @bo: &amdgpu_bo buffer object
1148  * @buffer: returned metadata
1149  * @buffer_size: size of the buffer
1150  * @metadata_size: size of the returned metadata
1151  * @flags: flags of the returned metadata
1152  *
1153  * Gets buffer object's metadata, its size and flags. buffer_size shall not be
1154  * less than metadata_size.
1155  * Used via GEM ioctl.
1156  *
1157  * Returns:
1158  * 0 for success or a negative error code on failure.
1159  */
1160 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
1161 			   size_t buffer_size, uint32_t *metadata_size,
1162 			   uint64_t *flags)
1163 {
1164 	if (!buffer && !metadata_size)
1165 		return -EINVAL;
1166 
1167 	if (buffer) {
1168 		if (buffer_size < bo->metadata_size)
1169 			return -EINVAL;
1170 
1171 		if (bo->metadata_size)
1172 			memcpy(buffer, bo->metadata, bo->metadata_size);
1173 	}
1174 
1175 	if (metadata_size)
1176 		*metadata_size = bo->metadata_size;
1177 	if (flags)
1178 		*flags = bo->metadata_flags;
1179 
1180 	return 0;
1181 }
1182 
1183 /**
1184  * amdgpu_bo_move_notify - notification about a memory move
1185  * @bo: pointer to a buffer object
1186  * @evict: if this move is evicting the buffer from the graphics address space
1187  * @new_mem: new information of the bufer object
1188  *
1189  * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
1190  * bookkeeping.
1191  * TTM driver callback which is called when ttm moves a buffer.
1192  */
1193 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
1194 			   bool evict,
1195 			   struct ttm_mem_reg *new_mem)
1196 {
1197 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1198 	struct amdgpu_bo *abo;
1199 	struct ttm_mem_reg *old_mem = &bo->mem;
1200 
1201 	if (!amdgpu_bo_is_amdgpu_bo(bo))
1202 		return;
1203 
1204 	abo = ttm_to_amdgpu_bo(bo);
1205 	amdgpu_vm_bo_invalidate(adev, abo, evict);
1206 
1207 	amdgpu_bo_kunmap(abo);
1208 
1209 	/* remember the eviction */
1210 	if (evict)
1211 		atomic64_inc(&adev->num_evictions);
1212 
1213 	/* update statistics */
1214 	if (!new_mem)
1215 		return;
1216 
1217 	/* move_notify is called before move happens */
1218 	trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
1219 }
1220 
1221 /**
1222  * amdgpu_bo_move_notify - notification about a BO being released
1223  * @bo: pointer to a buffer object
1224  *
1225  * Wipes VRAM buffers whose contents should not be leaked before the
1226  * memory is released.
1227  */
1228 void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
1229 {
1230 	struct dma_fence *fence = NULL;
1231 	struct amdgpu_bo *abo;
1232 	int r;
1233 
1234 	if (!amdgpu_bo_is_amdgpu_bo(bo))
1235 		return;
1236 
1237 	abo = ttm_to_amdgpu_bo(bo);
1238 
1239 	if (abo->kfd_bo)
1240 		amdgpu_amdkfd_unreserve_memory_limit(abo);
1241 
1242 	if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
1243 	    !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
1244 		return;
1245 
1246 	dma_resv_lock(bo->base.resv, NULL);
1247 
1248 	r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
1249 	if (!WARN_ON(r)) {
1250 		amdgpu_bo_fence(abo, fence, false);
1251 		dma_fence_put(fence);
1252 	}
1253 
1254 	dma_resv_unlock(bo->base.resv);
1255 }
1256 
1257 /**
1258  * amdgpu_bo_fault_reserve_notify - notification about a memory fault
1259  * @bo: pointer to a buffer object
1260  *
1261  * Notifies the driver we are taking a fault on this BO and have reserved it,
1262  * also performs bookkeeping.
1263  * TTM driver callback for dealing with vm faults.
1264  *
1265  * Returns:
1266  * 0 for success or a negative error code on failure.
1267  */
1268 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1269 {
1270 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1271 	struct ttm_operation_ctx ctx = { false, false };
1272 	struct amdgpu_bo *abo;
1273 	unsigned long offset, size;
1274 	int r;
1275 
1276 	if (!amdgpu_bo_is_amdgpu_bo(bo))
1277 		return 0;
1278 
1279 	abo = ttm_to_amdgpu_bo(bo);
1280 
1281 	/* Remember that this BO was accessed by the CPU */
1282 	abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1283 
1284 	if (bo->mem.mem_type != TTM_PL_VRAM)
1285 		return 0;
1286 
1287 	size = bo->mem.num_pages << PAGE_SHIFT;
1288 	offset = bo->mem.start << PAGE_SHIFT;
1289 	if ((offset + size) <= adev->gmc.visible_vram_size)
1290 		return 0;
1291 
1292 	/* Can't move a pinned BO to visible VRAM */
1293 	if (abo->pin_count > 0)
1294 		return -EINVAL;
1295 
1296 	/* hurrah the memory is not visible ! */
1297 	atomic64_inc(&adev->num_vram_cpu_page_faults);
1298 	amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
1299 					AMDGPU_GEM_DOMAIN_GTT);
1300 
1301 	/* Avoid costly evictions; only set GTT as a busy placement */
1302 	abo->placement.num_busy_placement = 1;
1303 	abo->placement.busy_placement = &abo->placements[1];
1304 
1305 	r = ttm_bo_validate(bo, &abo->placement, &ctx);
1306 	if (unlikely(r != 0))
1307 		return r;
1308 
1309 	offset = bo->mem.start << PAGE_SHIFT;
1310 	/* this should never happen */
1311 	if (bo->mem.mem_type == TTM_PL_VRAM &&
1312 	    (offset + size) > adev->gmc.visible_vram_size)
1313 		return -EINVAL;
1314 
1315 	return 0;
1316 }
1317 
1318 /**
1319  * amdgpu_bo_fence - add fence to buffer object
1320  *
1321  * @bo: buffer object in question
1322  * @fence: fence to add
1323  * @shared: true if fence should be added shared
1324  *
1325  */
1326 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
1327 		     bool shared)
1328 {
1329 	struct dma_resv *resv = bo->tbo.base.resv;
1330 
1331 	if (shared)
1332 		dma_resv_add_shared_fence(resv, fence);
1333 	else
1334 		dma_resv_add_excl_fence(resv, fence);
1335 }
1336 
1337 /**
1338  * amdgpu_sync_wait_resv - Wait for BO reservation fences
1339  *
1340  * @bo: buffer object
1341  * @owner: fence owner
1342  * @intr: Whether the wait is interruptible
1343  *
1344  * Returns:
1345  * 0 on success, errno otherwise.
1346  */
1347 int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
1348 {
1349 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1350 	struct amdgpu_sync sync;
1351 	int r;
1352 
1353 	amdgpu_sync_create(&sync);
1354 	amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv, owner, false);
1355 	r = amdgpu_sync_wait(&sync, intr);
1356 	amdgpu_sync_free(&sync);
1357 
1358 	return r;
1359 }
1360 
1361 /**
1362  * amdgpu_bo_gpu_offset - return GPU offset of bo
1363  * @bo:	amdgpu object for which we query the offset
1364  *
1365  * Note: object should either be pinned or reserved when calling this
1366  * function, it might be useful to add check for this for debugging.
1367  *
1368  * Returns:
1369  * current GPU offset of the object.
1370  */
1371 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1372 {
1373 	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
1374 	WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
1375 		     !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel);
1376 	WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
1377 	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
1378 		     !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
1379 
1380 	return amdgpu_gmc_sign_extend(bo->tbo.offset);
1381 }
1382 
1383 /**
1384  * amdgpu_bo_get_preferred_pin_domain - get preferred domain for scanout
1385  * @adev: amdgpu device object
1386  * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
1387  *
1388  * Returns:
1389  * Which of the allowed domains is preferred for pinning the BO for scanout.
1390  */
1391 uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
1392 					    uint32_t domain)
1393 {
1394 	if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
1395 		domain = AMDGPU_GEM_DOMAIN_VRAM;
1396 		if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
1397 			domain = AMDGPU_GEM_DOMAIN_GTT;
1398 	}
1399 	return domain;
1400 }
1401