1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Christian König
23  */
24 
25 #include <linux/dma-mapping.h>
26 #include <drm/ttm/ttm_range_manager.h>
27 
28 #include "amdgpu.h"
29 #include "amdgpu_vm.h"
30 #include "amdgpu_res_cursor.h"
31 #include "amdgpu_atomfirmware.h"
32 #include "atom.h"
33 
34 struct amdgpu_vram_reservation {
35 	u64 start;
36 	u64 size;
37 	struct list_head allocated;
38 	struct list_head blocks;
39 };
40 
41 static inline struct amdgpu_vram_mgr *
42 to_vram_mgr(struct ttm_resource_manager *man)
43 {
44 	return container_of(man, struct amdgpu_vram_mgr, manager);
45 }
46 
47 static inline struct amdgpu_device *
48 to_amdgpu_device(struct amdgpu_vram_mgr *mgr)
49 {
50 	return container_of(mgr, struct amdgpu_device, mman.vram_mgr);
51 }
52 
53 /**
54  * DOC: mem_info_vram_total
55  *
56  * The amdgpu driver provides a sysfs API for reporting current total VRAM
57  * available on the device
58  * The file mem_info_vram_total is used for this and returns the total
59  * amount of VRAM in bytes
60  */
61 static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev,
62 		struct device_attribute *attr, char *buf)
63 {
64 	struct drm_device *ddev = dev_get_drvdata(dev);
65 	struct amdgpu_device *adev = drm_to_adev(ddev);
66 
67 	return sysfs_emit(buf, "%llu\n", adev->gmc.real_vram_size);
68 }
69 
70 /**
71  * DOC: mem_info_vis_vram_total
72  *
73  * The amdgpu driver provides a sysfs API for reporting current total
74  * visible VRAM available on the device
75  * The file mem_info_vis_vram_total is used for this and returns the total
76  * amount of visible VRAM in bytes
77  */
78 static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev,
79 		struct device_attribute *attr, char *buf)
80 {
81 	struct drm_device *ddev = dev_get_drvdata(dev);
82 	struct amdgpu_device *adev = drm_to_adev(ddev);
83 
84 	return sysfs_emit(buf, "%llu\n", adev->gmc.visible_vram_size);
85 }
86 
87 /**
88  * DOC: mem_info_vram_used
89  *
90  * The amdgpu driver provides a sysfs API for reporting current total VRAM
91  * available on the device
92  * The file mem_info_vram_used is used for this and returns the total
93  * amount of currently used VRAM in bytes
94  */
95 static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
96 					      struct device_attribute *attr,
97 					      char *buf)
98 {
99 	struct drm_device *ddev = dev_get_drvdata(dev);
100 	struct amdgpu_device *adev = drm_to_adev(ddev);
101 	struct ttm_resource_manager *man = &adev->mman.vram_mgr.manager;
102 
103 	return sysfs_emit(buf, "%llu\n", ttm_resource_manager_usage(man));
104 }
105 
106 /**
107  * DOC: mem_info_vis_vram_used
108  *
109  * The amdgpu driver provides a sysfs API for reporting current total of
110  * used visible VRAM
111  * The file mem_info_vis_vram_used is used for this and returns the total
112  * amount of currently used visible VRAM in bytes
113  */
114 static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
115 						  struct device_attribute *attr,
116 						  char *buf)
117 {
118 	struct drm_device *ddev = dev_get_drvdata(dev);
119 	struct amdgpu_device *adev = drm_to_adev(ddev);
120 
121 	return sysfs_emit(buf, "%llu\n",
122 			  amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr));
123 }
124 
125 /**
126  * DOC: mem_info_vram_vendor
127  *
128  * The amdgpu driver provides a sysfs API for reporting the vendor of the
129  * installed VRAM
130  * The file mem_info_vram_vendor is used for this and returns the name of the
131  * vendor.
132  */
133 static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
134 					   struct device_attribute *attr,
135 					   char *buf)
136 {
137 	struct drm_device *ddev = dev_get_drvdata(dev);
138 	struct amdgpu_device *adev = drm_to_adev(ddev);
139 
140 	switch (adev->gmc.vram_vendor) {
141 	case SAMSUNG:
142 		return sysfs_emit(buf, "samsung\n");
143 	case INFINEON:
144 		return sysfs_emit(buf, "infineon\n");
145 	case ELPIDA:
146 		return sysfs_emit(buf, "elpida\n");
147 	case ETRON:
148 		return sysfs_emit(buf, "etron\n");
149 	case NANYA:
150 		return sysfs_emit(buf, "nanya\n");
151 	case HYNIX:
152 		return sysfs_emit(buf, "hynix\n");
153 	case MOSEL:
154 		return sysfs_emit(buf, "mosel\n");
155 	case WINBOND:
156 		return sysfs_emit(buf, "winbond\n");
157 	case ESMT:
158 		return sysfs_emit(buf, "esmt\n");
159 	case MICRON:
160 		return sysfs_emit(buf, "micron\n");
161 	default:
162 		return sysfs_emit(buf, "unknown\n");
163 	}
164 }
165 
166 static DEVICE_ATTR(mem_info_vram_total, S_IRUGO,
167 		   amdgpu_mem_info_vram_total_show, NULL);
168 static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO,
169 		   amdgpu_mem_info_vis_vram_total_show,NULL);
170 static DEVICE_ATTR(mem_info_vram_used, S_IRUGO,
171 		   amdgpu_mem_info_vram_used_show, NULL);
172 static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
173 		   amdgpu_mem_info_vis_vram_used_show, NULL);
174 static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO,
175 		   amdgpu_mem_info_vram_vendor, NULL);
176 
177 static struct attribute *amdgpu_vram_mgr_attributes[] = {
178 	&dev_attr_mem_info_vram_total.attr,
179 	&dev_attr_mem_info_vis_vram_total.attr,
180 	&dev_attr_mem_info_vram_used.attr,
181 	&dev_attr_mem_info_vis_vram_used.attr,
182 	&dev_attr_mem_info_vram_vendor.attr,
183 	NULL
184 };
185 
186 const struct attribute_group amdgpu_vram_mgr_attr_group = {
187 	.attrs = amdgpu_vram_mgr_attributes
188 };
189 
190 /**
191  * amdgpu_vram_mgr_vis_size - Calculate visible block size
192  *
193  * @adev: amdgpu_device pointer
194  * @block: DRM BUDDY block structure
195  *
196  * Calculate how many bytes of the DRM BUDDY block are inside visible VRAM
197  */
198 static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
199 				    struct drm_buddy_block *block)
200 {
201 	u64 start = amdgpu_vram_mgr_block_start(block);
202 	u64 end = start + amdgpu_vram_mgr_block_size(block);
203 
204 	if (start >= adev->gmc.visible_vram_size)
205 		return 0;
206 
207 	return (end > adev->gmc.visible_vram_size ?
208 		adev->gmc.visible_vram_size : end) - start;
209 }
210 
211 /**
212  * amdgpu_vram_mgr_bo_visible_size - CPU visible BO size
213  *
214  * @bo: &amdgpu_bo buffer object (must be in VRAM)
215  *
216  * Returns:
217  * How much of the given &amdgpu_bo buffer object lies in CPU visible VRAM.
218  */
219 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
220 {
221 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
222 	struct ttm_resource *res = bo->tbo.resource;
223 	struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
224 	struct drm_buddy_block *block;
225 	u64 usage = 0;
226 
227 	if (amdgpu_gmc_vram_full_visible(&adev->gmc))
228 		return amdgpu_bo_size(bo);
229 
230 	if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
231 		return 0;
232 
233 	list_for_each_entry(block, &vres->blocks, link)
234 		usage += amdgpu_vram_mgr_vis_size(adev, block);
235 
236 	return usage;
237 }
238 
239 /* Commit the reservation of VRAM pages */
240 static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
241 {
242 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
243 	struct amdgpu_device *adev = to_amdgpu_device(mgr);
244 	struct drm_buddy *mm = &mgr->mm;
245 	struct amdgpu_vram_reservation *rsv, *temp;
246 	struct drm_buddy_block *block;
247 	uint64_t vis_usage;
248 
249 	list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) {
250 		if (drm_buddy_alloc_blocks(mm, rsv->start, rsv->start + rsv->size,
251 					   rsv->size, mm->chunk_size, &rsv->allocated,
252 					   DRM_BUDDY_RANGE_ALLOCATION))
253 			continue;
254 
255 		block = amdgpu_vram_mgr_first_block(&rsv->allocated);
256 		if (!block)
257 			continue;
258 
259 		dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n",
260 			rsv->start, rsv->size);
261 
262 		vis_usage = amdgpu_vram_mgr_vis_size(adev, block);
263 		atomic64_add(vis_usage, &mgr->vis_usage);
264 		spin_lock(&man->bdev->lru_lock);
265 		man->usage += rsv->size;
266 		spin_unlock(&man->bdev->lru_lock);
267 		list_move(&rsv->blocks, &mgr->reserved_pages);
268 	}
269 }
270 
271 /**
272  * amdgpu_vram_mgr_reserve_range - Reserve a range from VRAM
273  *
274  * @mgr: amdgpu_vram_mgr pointer
275  * @start: start address of the range in VRAM
276  * @size: size of the range
277  *
278  * Reserve memory from start address with the specified size in VRAM
279  */
280 int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
281 				  uint64_t start, uint64_t size)
282 {
283 	struct amdgpu_vram_reservation *rsv;
284 
285 	rsv = kzalloc(sizeof(*rsv), GFP_KERNEL);
286 	if (!rsv)
287 		return -ENOMEM;
288 
289 	INIT_LIST_HEAD(&rsv->allocated);
290 	INIT_LIST_HEAD(&rsv->blocks);
291 
292 	rsv->start = start;
293 	rsv->size = size;
294 
295 	mutex_lock(&mgr->lock);
296 	list_add_tail(&rsv->blocks, &mgr->reservations_pending);
297 	amdgpu_vram_mgr_do_reserve(&mgr->manager);
298 	mutex_unlock(&mgr->lock);
299 
300 	return 0;
301 }
302 
303 /**
304  * amdgpu_vram_mgr_query_page_status - query the reservation status
305  *
306  * @mgr: amdgpu_vram_mgr pointer
307  * @start: start address of a page in VRAM
308  *
309  * Returns:
310  *	-EBUSY: the page is still hold and in pending list
311  *	0: the page has been reserved
312  *	-ENOENT: the input page is not a reservation
313  */
314 int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
315 				      uint64_t start)
316 {
317 	struct amdgpu_vram_reservation *rsv;
318 	int ret;
319 
320 	mutex_lock(&mgr->lock);
321 
322 	list_for_each_entry(rsv, &mgr->reservations_pending, blocks) {
323 		if (rsv->start <= start &&
324 		    (start < (rsv->start + rsv->size))) {
325 			ret = -EBUSY;
326 			goto out;
327 		}
328 	}
329 
330 	list_for_each_entry(rsv, &mgr->reserved_pages, blocks) {
331 		if (rsv->start <= start &&
332 		    (start < (rsv->start + rsv->size))) {
333 			ret = 0;
334 			goto out;
335 		}
336 	}
337 
338 	ret = -ENOENT;
339 out:
340 	mutex_unlock(&mgr->lock);
341 	return ret;
342 }
343 
344 /**
345  * amdgpu_vram_mgr_new - allocate new ranges
346  *
347  * @man: TTM memory type manager
348  * @tbo: TTM BO we need this range for
349  * @place: placement flags and restrictions
350  * @res: the resulting mem object
351  *
352  * Allocate VRAM for the given BO.
353  */
354 static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
355 			       struct ttm_buffer_object *tbo,
356 			       const struct ttm_place *place,
357 			       struct ttm_resource **res)
358 {
359 	u64 vis_usage = 0, max_bytes, cur_size, min_block_size;
360 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
361 	struct amdgpu_device *adev = to_amdgpu_device(mgr);
362 	struct amdgpu_vram_mgr_resource *vres;
363 	u64 size, remaining_size, lpfn, fpfn;
364 	struct drm_buddy *mm = &mgr->mm;
365 	struct drm_buddy_block *block;
366 	unsigned long pages_per_block;
367 	int r;
368 
369 	lpfn = place->lpfn << PAGE_SHIFT;
370 	if (!lpfn)
371 		lpfn = man->size;
372 
373 	fpfn = place->fpfn << PAGE_SHIFT;
374 
375 	max_bytes = adev->gmc.mc_vram_size;
376 	if (tbo->type != ttm_bo_type_kernel)
377 		max_bytes -= AMDGPU_VM_RESERVED_VRAM;
378 
379 	if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
380 		pages_per_block = ~0ul;
381 	} else {
382 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
383 		pages_per_block = HPAGE_PMD_NR;
384 #else
385 		/* default to 2MB */
386 		pages_per_block = 2UL << (20UL - PAGE_SHIFT);
387 #endif
388 		pages_per_block = max_t(uint32_t, pages_per_block,
389 					tbo->page_alignment);
390 	}
391 
392 	vres = kzalloc(sizeof(*vres), GFP_KERNEL);
393 	if (!vres)
394 		return -ENOMEM;
395 
396 	ttm_resource_init(tbo, place, &vres->base);
397 
398 	/* bail out quickly if there's likely not enough VRAM for this BO */
399 	if (ttm_resource_manager_usage(man) > max_bytes) {
400 		r = -ENOSPC;
401 		goto error_fini;
402 	}
403 
404 	INIT_LIST_HEAD(&vres->blocks);
405 
406 	if (place->flags & TTM_PL_FLAG_TOPDOWN)
407 		vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
408 
409 	if (fpfn || lpfn != man->size)
410 		/* Allocate blocks in desired range */
411 		vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
412 
413 	remaining_size = vres->base.num_pages << PAGE_SHIFT;
414 
415 	mutex_lock(&mgr->lock);
416 	while (remaining_size) {
417 		if (tbo->page_alignment)
418 			min_block_size = tbo->page_alignment << PAGE_SHIFT;
419 		else
420 			min_block_size = mgr->default_page_size;
421 
422 		BUG_ON(min_block_size < mm->chunk_size);
423 
424 		/* Limit maximum size to 2GiB due to SG table limitations */
425 		size = min(remaining_size, 2ULL << 30);
426 
427 		if (size >= pages_per_block << PAGE_SHIFT)
428 			min_block_size = pages_per_block << PAGE_SHIFT;
429 
430 		cur_size = size;
431 
432 		if (fpfn + size != place->lpfn << PAGE_SHIFT) {
433 			/*
434 			 * Except for actual range allocation, modify the size and
435 			 * min_block_size conforming to continuous flag enablement
436 			 */
437 			if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
438 				size = roundup_pow_of_two(size);
439 				min_block_size = size;
440 			/*
441 			 * Modify the size value if size is not
442 			 * aligned with min_block_size
443 			 */
444 			} else if (!IS_ALIGNED(size, min_block_size)) {
445 				size = round_up(size, min_block_size);
446 			}
447 		}
448 
449 		r = drm_buddy_alloc_blocks(mm, fpfn,
450 					   lpfn,
451 					   size,
452 					   min_block_size,
453 					   &vres->blocks,
454 					   vres->flags);
455 		if (unlikely(r))
456 			goto error_free_blocks;
457 
458 		if (size > remaining_size)
459 			remaining_size = 0;
460 		else
461 			remaining_size -= size;
462 	}
463 	mutex_unlock(&mgr->lock);
464 
465 	if (cur_size != size) {
466 		struct drm_buddy_block *block;
467 		struct list_head *trim_list;
468 		u64 original_size;
469 		LIST_HEAD(temp);
470 
471 		trim_list = &vres->blocks;
472 		original_size = vres->base.num_pages << PAGE_SHIFT;
473 
474 		/*
475 		 * If size value is rounded up to min_block_size, trim the last
476 		 * block to the required size
477 		 */
478 		if (!list_is_singular(&vres->blocks)) {
479 			block = list_last_entry(&vres->blocks, typeof(*block), link);
480 			list_move_tail(&block->link, &temp);
481 			trim_list = &temp;
482 			/*
483 			 * Compute the original_size value by subtracting the
484 			 * last block size with (aligned size - original size)
485 			 */
486 			original_size = amdgpu_vram_mgr_block_size(block) - (size - cur_size);
487 		}
488 
489 		mutex_lock(&mgr->lock);
490 		drm_buddy_block_trim(mm,
491 				     original_size,
492 				     trim_list);
493 		mutex_unlock(&mgr->lock);
494 
495 		if (!list_empty(&temp))
496 			list_splice_tail(trim_list, &vres->blocks);
497 	}
498 
499 	list_for_each_entry(block, &vres->blocks, link)
500 		vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
501 
502 	block = amdgpu_vram_mgr_first_block(&vres->blocks);
503 	if (!block) {
504 		r = -EINVAL;
505 		goto error_fini;
506 	}
507 
508 	vres->base.start = amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT;
509 
510 	if (amdgpu_is_vram_mgr_blocks_contiguous(&vres->blocks))
511 		vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
512 
513 	if (adev->gmc.xgmi.connected_to_cpu)
514 		vres->base.bus.caching = ttm_cached;
515 	else
516 		vres->base.bus.caching = ttm_write_combined;
517 
518 	atomic64_add(vis_usage, &mgr->vis_usage);
519 	*res = &vres->base;
520 	return 0;
521 
522 error_free_blocks:
523 	drm_buddy_free_list(mm, &vres->blocks);
524 	mutex_unlock(&mgr->lock);
525 error_fini:
526 	ttm_resource_fini(man, &vres->base);
527 	kfree(vres);
528 
529 	return r;
530 }
531 
532 /**
533  * amdgpu_vram_mgr_del - free ranges
534  *
535  * @man: TTM memory type manager
536  * @res: TTM memory object
537  *
538  * Free the allocated VRAM again.
539  */
540 static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
541 				struct ttm_resource *res)
542 {
543 	struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
544 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
545 	struct amdgpu_device *adev = to_amdgpu_device(mgr);
546 	struct drm_buddy *mm = &mgr->mm;
547 	struct drm_buddy_block *block;
548 	uint64_t vis_usage = 0;
549 
550 	mutex_lock(&mgr->lock);
551 	list_for_each_entry(block, &vres->blocks, link)
552 		vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
553 
554 	amdgpu_vram_mgr_do_reserve(man);
555 
556 	drm_buddy_free_list(mm, &vres->blocks);
557 	mutex_unlock(&mgr->lock);
558 
559 	atomic64_sub(vis_usage, &mgr->vis_usage);
560 
561 	ttm_resource_fini(man, res);
562 	kfree(vres);
563 }
564 
565 /**
566  * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
567  *
568  * @adev: amdgpu device pointer
569  * @res: TTM memory object
570  * @offset: byte offset from the base of VRAM BO
571  * @length: number of bytes to export in sg_table
572  * @dev: the other device
573  * @dir: dma direction
574  * @sgt: resulting sg table
575  *
576  * Allocate and fill a sg table from a VRAM allocation.
577  */
578 int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
579 			      struct ttm_resource *res,
580 			      u64 offset, u64 length,
581 			      struct device *dev,
582 			      enum dma_data_direction dir,
583 			      struct sg_table **sgt)
584 {
585 	struct amdgpu_res_cursor cursor;
586 	struct scatterlist *sg;
587 	int num_entries = 0;
588 	int i, r;
589 
590 	*sgt = kmalloc(sizeof(**sgt), GFP_KERNEL);
591 	if (!*sgt)
592 		return -ENOMEM;
593 
594 	/* Determine the number of DRM_BUDDY blocks to export */
595 	amdgpu_res_first(res, offset, length, &cursor);
596 	while (cursor.remaining) {
597 		num_entries++;
598 		amdgpu_res_next(&cursor, cursor.size);
599 	}
600 
601 	r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
602 	if (r)
603 		goto error_free;
604 
605 	/* Initialize scatterlist nodes of sg_table */
606 	for_each_sgtable_sg((*sgt), sg, i)
607 		sg->length = 0;
608 
609 	/*
610 	 * Walk down DRM_BUDDY blocks to populate scatterlist nodes
611 	 * @note: Use iterator api to get first the DRM_BUDDY block
612 	 * and the number of bytes from it. Access the following
613 	 * DRM_BUDDY block(s) if more buffer needs to exported
614 	 */
615 	amdgpu_res_first(res, offset, length, &cursor);
616 	for_each_sgtable_sg((*sgt), sg, i) {
617 		phys_addr_t phys = cursor.start + adev->gmc.aper_base;
618 		size_t size = cursor.size;
619 		dma_addr_t addr;
620 
621 		addr = dma_map_resource(dev, phys, size, dir,
622 					DMA_ATTR_SKIP_CPU_SYNC);
623 		r = dma_mapping_error(dev, addr);
624 		if (r)
625 			goto error_unmap;
626 
627 		sg_set_page(sg, NULL, size, 0);
628 		sg_dma_address(sg) = addr;
629 		sg_dma_len(sg) = size;
630 
631 		amdgpu_res_next(&cursor, cursor.size);
632 	}
633 
634 	return 0;
635 
636 error_unmap:
637 	for_each_sgtable_sg((*sgt), sg, i) {
638 		if (!sg->length)
639 			continue;
640 
641 		dma_unmap_resource(dev, sg->dma_address,
642 				   sg->length, dir,
643 				   DMA_ATTR_SKIP_CPU_SYNC);
644 	}
645 	sg_free_table(*sgt);
646 
647 error_free:
648 	kfree(*sgt);
649 	return r;
650 }
651 
652 /**
653  * amdgpu_vram_mgr_free_sgt - allocate and fill a sg table
654  *
655  * @dev: device pointer
656  * @dir: data direction of resource to unmap
657  * @sgt: sg table to free
658  *
659  * Free a previously allocate sg table.
660  */
661 void amdgpu_vram_mgr_free_sgt(struct device *dev,
662 			      enum dma_data_direction dir,
663 			      struct sg_table *sgt)
664 {
665 	struct scatterlist *sg;
666 	int i;
667 
668 	for_each_sgtable_sg(sgt, sg, i)
669 		dma_unmap_resource(dev, sg->dma_address,
670 				   sg->length, dir,
671 				   DMA_ATTR_SKIP_CPU_SYNC);
672 	sg_free_table(sgt);
673 	kfree(sgt);
674 }
675 
676 /**
677  * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part
678  *
679  * @mgr: amdgpu_vram_mgr pointer
680  *
681  * Returns how many bytes are used in the visible part of VRAM
682  */
683 uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr)
684 {
685 	return atomic64_read(&mgr->vis_usage);
686 }
687 
688 /**
689  * amdgpu_vram_mgr_debug - dump VRAM table
690  *
691  * @man: TTM memory type manager
692  * @printer: DRM printer to use
693  *
694  * Dump the table content using printk.
695  */
696 static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
697 				  struct drm_printer *printer)
698 {
699 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
700 	struct drm_buddy *mm = &mgr->mm;
701 	struct drm_buddy_block *block;
702 
703 	drm_printf(printer, "  vis usage:%llu\n",
704 		   amdgpu_vram_mgr_vis_usage(mgr));
705 
706 	mutex_lock(&mgr->lock);
707 	drm_printf(printer, "default_page_size: %lluKiB\n",
708 		   mgr->default_page_size >> 10);
709 
710 	drm_buddy_print(mm, printer);
711 
712 	drm_printf(printer, "reserved:\n");
713 	list_for_each_entry(block, &mgr->reserved_pages, link)
714 		drm_buddy_block_print(mm, block, printer);
715 	mutex_unlock(&mgr->lock);
716 }
717 
718 static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
719 	.alloc	= amdgpu_vram_mgr_new,
720 	.free	= amdgpu_vram_mgr_del,
721 	.debug	= amdgpu_vram_mgr_debug
722 };
723 
724 /**
725  * amdgpu_vram_mgr_init - init VRAM manager and DRM MM
726  *
727  * @adev: amdgpu_device pointer
728  *
729  * Allocate and initialize the VRAM manager.
730  */
731 int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
732 {
733 	struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
734 	struct ttm_resource_manager *man = &mgr->manager;
735 	int err;
736 
737 	ttm_resource_manager_init(man, &adev->mman.bdev,
738 				  adev->gmc.real_vram_size);
739 
740 	man->func = &amdgpu_vram_mgr_func;
741 
742 	err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
743 	if (err)
744 		return err;
745 
746 	mutex_init(&mgr->lock);
747 	INIT_LIST_HEAD(&mgr->reservations_pending);
748 	INIT_LIST_HEAD(&mgr->reserved_pages);
749 	mgr->default_page_size = PAGE_SIZE;
750 
751 	ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);
752 	ttm_resource_manager_set_used(man, true);
753 	return 0;
754 }
755 
756 /**
757  * amdgpu_vram_mgr_fini - free and destroy VRAM manager
758  *
759  * @adev: amdgpu_device pointer
760  *
761  * Destroy and free the VRAM manager, returns -EBUSY if ranges are still
762  * allocated inside it.
763  */
764 void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
765 {
766 	struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
767 	struct ttm_resource_manager *man = &mgr->manager;
768 	int ret;
769 	struct amdgpu_vram_reservation *rsv, *temp;
770 
771 	ttm_resource_manager_set_used(man, false);
772 
773 	ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
774 	if (ret)
775 		return;
776 
777 	mutex_lock(&mgr->lock);
778 	list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks)
779 		kfree(rsv);
780 
781 	list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) {
782 		drm_buddy_free_list(&mgr->mm, &rsv->blocks);
783 		kfree(rsv);
784 	}
785 	drm_buddy_fini(&mgr->mm);
786 	mutex_unlock(&mgr->lock);
787 
788 	ttm_resource_manager_cleanup(man);
789 	ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL);
790 }
791