1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Christian König
23  */
24 
25 #include <linux/dma-mapping.h>
26 #include "amdgpu.h"
27 #include "amdgpu_vm.h"
28 #include "amdgpu_atomfirmware.h"
29 #include "atom.h"
30 
31 static int amdgpu_vram_mgr_free_backup_pages(struct amdgpu_vram_mgr *mgr,
32 					     uint32_t num_pages);
33 
34 static inline struct amdgpu_vram_mgr *to_vram_mgr(struct ttm_resource_manager *man)
35 {
36 	return container_of(man, struct amdgpu_vram_mgr, manager);
37 }
38 
39 static inline struct amdgpu_device *to_amdgpu_device(struct amdgpu_vram_mgr *mgr)
40 {
41 	return container_of(mgr, struct amdgpu_device, mman.vram_mgr);
42 }
43 
44 /**
45  * DOC: mem_info_vram_total
46  *
47  * The amdgpu driver provides a sysfs API for reporting current total VRAM
48  * available on the device
49  * The file mem_info_vram_total is used for this and returns the total
50  * amount of VRAM in bytes
51  */
52 static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev,
53 		struct device_attribute *attr, char *buf)
54 {
55 	struct drm_device *ddev = dev_get_drvdata(dev);
56 	struct amdgpu_device *adev = drm_to_adev(ddev);
57 
58 	return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.real_vram_size);
59 }
60 
61 /**
62  * DOC: mem_info_vis_vram_total
63  *
64  * The amdgpu driver provides a sysfs API for reporting current total
65  * visible VRAM available on the device
66  * The file mem_info_vis_vram_total is used for this and returns the total
67  * amount of visible VRAM in bytes
68  */
69 static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev,
70 		struct device_attribute *attr, char *buf)
71 {
72 	struct drm_device *ddev = dev_get_drvdata(dev);
73 	struct amdgpu_device *adev = drm_to_adev(ddev);
74 
75 	return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.visible_vram_size);
76 }
77 
78 /**
79  * DOC: mem_info_vram_used
80  *
81  * The amdgpu driver provides a sysfs API for reporting current total VRAM
82  * available on the device
83  * The file mem_info_vram_used is used for this and returns the total
84  * amount of currently used VRAM in bytes
85  */
86 static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
87 		struct device_attribute *attr, char *buf)
88 {
89 	struct drm_device *ddev = dev_get_drvdata(dev);
90 	struct amdgpu_device *adev = drm_to_adev(ddev);
91 	struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
92 
93 	return snprintf(buf, PAGE_SIZE, "%llu\n",
94 			amdgpu_vram_mgr_usage(man));
95 }
96 
97 /**
98  * DOC: mem_info_vis_vram_used
99  *
100  * The amdgpu driver provides a sysfs API for reporting current total of
101  * used visible VRAM
102  * The file mem_info_vis_vram_used is used for this and returns the total
103  * amount of currently used visible VRAM in bytes
104  */
105 static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
106 		struct device_attribute *attr, char *buf)
107 {
108 	struct drm_device *ddev = dev_get_drvdata(dev);
109 	struct amdgpu_device *adev = drm_to_adev(ddev);
110 	struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
111 
112 	return snprintf(buf, PAGE_SIZE, "%llu\n",
113 			amdgpu_vram_mgr_vis_usage(man));
114 }
115 
116 static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
117 						 struct device_attribute *attr,
118 						 char *buf)
119 {
120 	struct drm_device *ddev = dev_get_drvdata(dev);
121 	struct amdgpu_device *adev = drm_to_adev(ddev);
122 
123 	switch (adev->gmc.vram_vendor) {
124 	case SAMSUNG:
125 		return snprintf(buf, PAGE_SIZE, "samsung\n");
126 	case INFINEON:
127 		return snprintf(buf, PAGE_SIZE, "infineon\n");
128 	case ELPIDA:
129 		return snprintf(buf, PAGE_SIZE, "elpida\n");
130 	case ETRON:
131 		return snprintf(buf, PAGE_SIZE, "etron\n");
132 	case NANYA:
133 		return snprintf(buf, PAGE_SIZE, "nanya\n");
134 	case HYNIX:
135 		return snprintf(buf, PAGE_SIZE, "hynix\n");
136 	case MOSEL:
137 		return snprintf(buf, PAGE_SIZE, "mosel\n");
138 	case WINBOND:
139 		return snprintf(buf, PAGE_SIZE, "winbond\n");
140 	case ESMT:
141 		return snprintf(buf, PAGE_SIZE, "esmt\n");
142 	case MICRON:
143 		return snprintf(buf, PAGE_SIZE, "micron\n");
144 	default:
145 		return snprintf(buf, PAGE_SIZE, "unknown\n");
146 	}
147 }
148 
149 static DEVICE_ATTR(mem_info_vram_total, S_IRUGO,
150 		   amdgpu_mem_info_vram_total_show, NULL);
151 static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO,
152 		   amdgpu_mem_info_vis_vram_total_show,NULL);
153 static DEVICE_ATTR(mem_info_vram_used, S_IRUGO,
154 		   amdgpu_mem_info_vram_used_show, NULL);
155 static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
156 		   amdgpu_mem_info_vis_vram_used_show, NULL);
157 static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO,
158 		   amdgpu_mem_info_vram_vendor, NULL);
159 
160 static const struct attribute *amdgpu_vram_mgr_attributes[] = {
161 	&dev_attr_mem_info_vram_total.attr,
162 	&dev_attr_mem_info_vis_vram_total.attr,
163 	&dev_attr_mem_info_vram_used.attr,
164 	&dev_attr_mem_info_vis_vram_used.attr,
165 	&dev_attr_mem_info_vram_vendor.attr,
166 	NULL
167 };
168 
169 static const struct ttm_resource_manager_func amdgpu_vram_mgr_func;
170 
171 /**
172  * amdgpu_vram_mgr_init - init VRAM manager and DRM MM
173  *
174  * @adev: amdgpu_device pointer
175  *
176  * Allocate and initialize the VRAM manager.
177  */
178 int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
179 {
180 	struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
181 	struct ttm_resource_manager *man = &mgr->manager;
182 	int ret;
183 
184 	ttm_resource_manager_init(man, adev->gmc.real_vram_size >> PAGE_SHIFT);
185 
186 	man->func = &amdgpu_vram_mgr_func;
187 
188 	drm_mm_init(&mgr->mm, 0, man->size);
189 	spin_lock_init(&mgr->lock);
190 	INIT_LIST_HEAD(&mgr->reservations_pending);
191 	INIT_LIST_HEAD(&mgr->reserved_pages);
192 	INIT_LIST_HEAD(&mgr->backup_pages);
193 
194 	/* Add the two VRAM-related sysfs files */
195 	ret = sysfs_create_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes);
196 	if (ret)
197 		DRM_ERROR("Failed to register sysfs\n");
198 
199 	ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);
200 	ttm_resource_manager_set_used(man, true);
201 	return 0;
202 }
203 
204 /**
205  * amdgpu_vram_mgr_fini - free and destroy VRAM manager
206  *
207  * @adev: amdgpu_device pointer
208  *
209  * Destroy and free the VRAM manager, returns -EBUSY if ranges are still
210  * allocated inside it.
211  */
212 void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
213 {
214 	struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
215 	struct ttm_resource_manager *man = &mgr->manager;
216 	int ret;
217 	struct amdgpu_vram_reservation *rsv, *temp;
218 
219 	ttm_resource_manager_set_used(man, false);
220 
221 	ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
222 	if (ret)
223 		return;
224 
225 	spin_lock(&mgr->lock);
226 	list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node)
227 		kfree(rsv);
228 
229 	list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, node) {
230 		drm_mm_remove_node(&rsv->mm_node);
231 		kfree(rsv);
232 	}
233 
234 	list_for_each_entry_safe(rsv, temp, &mgr->backup_pages, node) {
235 		drm_mm_remove_node(&rsv->mm_node);
236 		kfree(rsv);
237 	}
238 	drm_mm_takedown(&mgr->mm);
239 	spin_unlock(&mgr->lock);
240 
241 	sysfs_remove_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes);
242 
243 	ttm_resource_manager_cleanup(man);
244 	ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL);
245 }
246 
247 /**
248  * amdgpu_vram_mgr_vis_size - Calculate visible node size
249  *
250  * @adev: amdgpu_device pointer
251  * @node: MM node structure
252  *
253  * Calculate how many bytes of the MM node are inside visible VRAM
254  */
255 static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
256 				    struct drm_mm_node *node)
257 {
258 	uint64_t start = node->start << PAGE_SHIFT;
259 	uint64_t end = (node->size + node->start) << PAGE_SHIFT;
260 
261 	if (start >= adev->gmc.visible_vram_size)
262 		return 0;
263 
264 	return (end > adev->gmc.visible_vram_size ?
265 		adev->gmc.visible_vram_size : end) - start;
266 }
267 
268 /**
269  * amdgpu_vram_mgr_bo_visible_size - CPU visible BO size
270  *
271  * @bo: &amdgpu_bo buffer object (must be in VRAM)
272  *
273  * Returns:
274  * How much of the given &amdgpu_bo buffer object lies in CPU visible VRAM.
275  */
276 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
277 {
278 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
279 	struct ttm_resource *mem = &bo->tbo.mem;
280 	struct drm_mm_node *nodes = mem->mm_node;
281 	unsigned pages = mem->num_pages;
282 	u64 usage;
283 
284 	if (amdgpu_gmc_vram_full_visible(&adev->gmc))
285 		return amdgpu_bo_size(bo);
286 
287 	if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
288 		return 0;
289 
290 	for (usage = 0; nodes && pages; pages -= nodes->size, nodes++)
291 		usage += amdgpu_vram_mgr_vis_size(adev, nodes);
292 
293 	return usage;
294 }
295 
296 static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
297 {
298 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
299 	struct amdgpu_device *adev = to_amdgpu_device(mgr);
300 	struct drm_mm *mm = &mgr->mm;
301 	struct amdgpu_vram_reservation *rsv, *temp;
302 	uint64_t vis_usage;
303 
304 	list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node) {
305 		if (drm_mm_reserve_node(mm, &rsv->mm_node))
306 			continue;
307 
308 		dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n",
309 			rsv->mm_node.start << PAGE_SHIFT, rsv->mm_node.size);
310 
311 		vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node);
312 		atomic64_add(vis_usage, &mgr->vis_usage);
313 		atomic64_add(rsv->mm_node.size << PAGE_SHIFT, &mgr->usage);
314 		list_move(&rsv->node, &mgr->reserved_pages);
315 
316 		amdgpu_vram_mgr_free_backup_pages(mgr, rsv->mm_node.size);
317 	}
318 }
319 
320 /**
321  * amdgpu_vram_mgr_reserve_range - Reserve a range from VRAM
322  *
323  * @man: TTM memory type manager
324  * @start: start address of the range in VRAM
325  * @size: size of the range
326  *
327  * Reserve memory from start addess with the specified size in VRAM
328  */
329 int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
330 				  uint64_t start, uint64_t size)
331 {
332 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
333 	struct amdgpu_device *adev = to_amdgpu_device(mgr);
334 	struct amdgpu_vram_reservation *rsv;
335 
336 	rsv = kzalloc(sizeof(*rsv), GFP_KERNEL);
337 	if (!rsv)
338 		return -ENOMEM;
339 
340 	INIT_LIST_HEAD(&rsv->node);
341 	rsv->mm_node.start = start >> PAGE_SHIFT;
342 	rsv->mm_node.size = size >> PAGE_SHIFT;
343 
344 	dev_dbg(adev->dev, "Pending Reservation: 0x%llx\n", start);
345 
346 	spin_lock(&mgr->lock);
347 	list_add_tail(&rsv->node, &mgr->reservations_pending);
348 	amdgpu_vram_mgr_do_reserve(man);
349 	spin_unlock(&mgr->lock);
350 
351 	return 0;
352 }
353 
354 static int amdgpu_vram_mgr_free_backup_pages(struct amdgpu_vram_mgr *mgr,
355 					     uint32_t num_pages)
356 {
357 	struct amdgpu_device *adev = to_amdgpu_device(mgr);
358 	struct amdgpu_vram_reservation *rsv;
359 	uint32_t i;
360 	uint64_t vis_usage = 0, total_usage = 0;
361 
362 	if (num_pages > mgr->num_backup_pages) {
363 		dev_warn(adev->dev, "No enough backup pages\n");
364 		return -EINVAL;
365 	}
366 
367 	for (i = 0; i < num_pages; i++) {
368 		rsv = list_first_entry(&mgr->backup_pages,
369 				       struct amdgpu_vram_reservation, node);
370 		vis_usage += amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node);
371 		total_usage += (rsv->mm_node.size << PAGE_SHIFT);
372 		drm_mm_remove_node(&rsv->mm_node);
373 		list_del(&rsv->node);
374 		kfree(rsv);
375 		mgr->num_backup_pages--;
376 	}
377 
378 	atomic64_sub(total_usage, &mgr->usage);
379 	atomic64_sub(vis_usage, &mgr->vis_usage);
380 
381 	return 0;
382 }
383 
384 int amdgpu_vram_mgr_reserve_backup_pages(struct ttm_resource_manager *man,
385 					 uint32_t num_pages)
386 {
387 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
388 	struct amdgpu_device *adev = to_amdgpu_device(mgr);
389 	struct amdgpu_vram_reservation *rsv;
390 	struct drm_mm *mm = &mgr->mm;
391 	uint32_t i;
392 	int ret = 0;
393 	uint64_t vis_usage, total_usage;
394 
395 	for (i = 0; i < num_pages; i++) {
396 		rsv = kzalloc(sizeof(*rsv), GFP_KERNEL);
397 		if (!rsv) {
398 			ret = -ENOMEM;
399 			goto pro_end;
400 		}
401 
402 		INIT_LIST_HEAD(&rsv->node);
403 
404 		ret = drm_mm_insert_node(mm, &rsv->mm_node, 1);
405 		if (ret) {
406 			dev_err(adev->dev, "failed to reserve backup page %d, ret 0x%x\n", i, ret);
407 			kfree(rsv);
408 			goto pro_end;
409 		}
410 
411 		vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node);
412 		total_usage = (rsv->mm_node.size << PAGE_SHIFT);
413 
414 		spin_lock(&mgr->lock);
415 		atomic64_add(vis_usage, &mgr->vis_usage);
416 		atomic64_add(total_usage, &mgr->usage);
417 		list_add_tail(&rsv->node, &mgr->backup_pages);
418 		mgr->num_backup_pages++;
419 		spin_unlock(&mgr->lock);
420 	}
421 
422 pro_end:
423 	if (ret) {
424 		spin_lock(&mgr->lock);
425 		amdgpu_vram_mgr_free_backup_pages(mgr, mgr->num_backup_pages);
426 		spin_unlock(&mgr->lock);
427 	}
428 
429 	return ret;
430 }
431 
432 /**
433  * amdgpu_vram_mgr_query_page_status - query the reservation status
434  *
435  * @man: TTM memory type manager
436  * @start: start address of a page in VRAM
437  *
438  * Returns:
439  *	-EBUSY: the page is still hold and in pending list
440  *	0: the page has been reserved
441  *	-ENOENT: the input page is not a reservation
442  */
443 int amdgpu_vram_mgr_query_page_status(struct ttm_resource_manager *man,
444 				      uint64_t start)
445 {
446 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
447 	struct amdgpu_vram_reservation *rsv;
448 	int ret;
449 
450 	spin_lock(&mgr->lock);
451 
452 	list_for_each_entry(rsv, &mgr->reservations_pending, node) {
453 		if ((rsv->mm_node.start <= start) &&
454 		    (start < (rsv->mm_node.start + rsv->mm_node.size))) {
455 			ret = -EBUSY;
456 			goto out;
457 		}
458 	}
459 
460 	list_for_each_entry(rsv, &mgr->reserved_pages, node) {
461 		if ((rsv->mm_node.start <= start) &&
462 		    (start < (rsv->mm_node.start + rsv->mm_node.size))) {
463 			ret = 0;
464 			goto out;
465 		}
466 	}
467 
468 	ret = -ENOENT;
469 out:
470 	spin_unlock(&mgr->lock);
471 	return ret;
472 }
473 
474 /**
475  * amdgpu_vram_mgr_virt_start - update virtual start address
476  *
477  * @mem: ttm_resource to update
478  * @node: just allocated node
479  *
480  * Calculate a virtual BO start address to easily check if everything is CPU
481  * accessible.
482  */
483 static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem,
484 				       struct drm_mm_node *node)
485 {
486 	unsigned long start;
487 
488 	start = node->start + node->size;
489 	if (start > mem->num_pages)
490 		start -= mem->num_pages;
491 	else
492 		start = 0;
493 	mem->start = max(mem->start, start);
494 }
495 
496 /**
497  * amdgpu_vram_mgr_new - allocate new ranges
498  *
499  * @man: TTM memory type manager
500  * @tbo: TTM BO we need this range for
501  * @place: placement flags and restrictions
502  * @mem: the resulting mem object
503  *
504  * Allocate VRAM for the given BO.
505  */
506 static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
507 			       struct ttm_buffer_object *tbo,
508 			       const struct ttm_place *place,
509 			       struct ttm_resource *mem)
510 {
511 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
512 	struct amdgpu_device *adev = to_amdgpu_device(mgr);
513 	struct drm_mm *mm = &mgr->mm;
514 	struct drm_mm_node *nodes;
515 	enum drm_mm_insert_mode mode;
516 	unsigned long lpfn, num_nodes, pages_per_node, pages_left;
517 	uint64_t vis_usage = 0, mem_bytes, max_bytes;
518 	unsigned i;
519 	int r;
520 
521 	lpfn = place->lpfn;
522 	if (!lpfn)
523 		lpfn = man->size;
524 
525 	max_bytes = adev->gmc.mc_vram_size;
526 	if (tbo->type != ttm_bo_type_kernel)
527 		max_bytes -= AMDGPU_VM_RESERVED_VRAM;
528 
529 	/* bail out quickly if there's likely not enough VRAM for this BO */
530 	mem_bytes = (u64)mem->num_pages << PAGE_SHIFT;
531 	if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
532 		atomic64_sub(mem_bytes, &mgr->usage);
533 		return -ENOSPC;
534 	}
535 
536 	if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
537 		pages_per_node = ~0ul;
538 		num_nodes = 1;
539 	} else {
540 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
541 		pages_per_node = HPAGE_PMD_NR;
542 #else
543 		/* default to 2MB */
544 		pages_per_node = (2UL << (20UL - PAGE_SHIFT));
545 #endif
546 		pages_per_node = max((uint32_t)pages_per_node, mem->page_alignment);
547 		num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
548 	}
549 
550 	nodes = kvmalloc_array((uint32_t)num_nodes, sizeof(*nodes),
551 			       GFP_KERNEL | __GFP_ZERO);
552 	if (!nodes) {
553 		atomic64_sub(mem_bytes, &mgr->usage);
554 		return -ENOMEM;
555 	}
556 
557 	mode = DRM_MM_INSERT_BEST;
558 	if (place->flags & TTM_PL_FLAG_TOPDOWN)
559 		mode = DRM_MM_INSERT_HIGH;
560 
561 	mem->start = 0;
562 	pages_left = mem->num_pages;
563 
564 	spin_lock(&mgr->lock);
565 	for (i = 0; pages_left >= pages_per_node; ++i) {
566 		unsigned long pages = rounddown_pow_of_two(pages_left);
567 
568 		/* Limit maximum size to 2GB due to SG table limitations */
569 		pages = min(pages, (2UL << (30 - PAGE_SHIFT)));
570 
571 		r = drm_mm_insert_node_in_range(mm, &nodes[i], pages,
572 						pages_per_node, 0,
573 						place->fpfn, lpfn,
574 						mode);
575 		if (unlikely(r))
576 			break;
577 
578 		vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
579 		amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
580 		pages_left -= pages;
581 	}
582 
583 	for (; pages_left; ++i) {
584 		unsigned long pages = min(pages_left, pages_per_node);
585 		uint32_t alignment = mem->page_alignment;
586 
587 		if (pages == pages_per_node)
588 			alignment = pages_per_node;
589 
590 		r = drm_mm_insert_node_in_range(mm, &nodes[i],
591 						pages, alignment, 0,
592 						place->fpfn, lpfn,
593 						mode);
594 		if (unlikely(r))
595 			goto error;
596 
597 		vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
598 		amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
599 		pages_left -= pages;
600 	}
601 	spin_unlock(&mgr->lock);
602 
603 	atomic64_add(vis_usage, &mgr->vis_usage);
604 
605 	mem->mm_node = nodes;
606 
607 	return 0;
608 
609 error:
610 	while (i--)
611 		drm_mm_remove_node(&nodes[i]);
612 	spin_unlock(&mgr->lock);
613 	atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage);
614 
615 	kvfree(nodes);
616 	return r;
617 }
618 
619 /**
620  * amdgpu_vram_mgr_del - free ranges
621  *
622  * @man: TTM memory type manager
623  * @mem: TTM memory object
624  *
625  * Free the allocated VRAM again.
626  */
627 static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
628 				struct ttm_resource *mem)
629 {
630 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
631 	struct amdgpu_device *adev = to_amdgpu_device(mgr);
632 	struct drm_mm_node *nodes = mem->mm_node;
633 	uint64_t usage = 0, vis_usage = 0;
634 	unsigned pages = mem->num_pages;
635 
636 	if (!mem->mm_node)
637 		return;
638 
639 	spin_lock(&mgr->lock);
640 	while (pages) {
641 		pages -= nodes->size;
642 		drm_mm_remove_node(nodes);
643 		usage += nodes->size << PAGE_SHIFT;
644 		vis_usage += amdgpu_vram_mgr_vis_size(adev, nodes);
645 		++nodes;
646 	}
647 	amdgpu_vram_mgr_do_reserve(man);
648 	spin_unlock(&mgr->lock);
649 
650 	atomic64_sub(usage, &mgr->usage);
651 	atomic64_sub(vis_usage, &mgr->vis_usage);
652 
653 	kvfree(mem->mm_node);
654 	mem->mm_node = NULL;
655 }
656 
657 /**
658  * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
659  *
660  * @adev: amdgpu device pointer
661  * @mem: TTM memory object
662  * @dev: the other device
663  * @dir: dma direction
664  * @sgt: resulting sg table
665  *
666  * Allocate and fill a sg table from a VRAM allocation.
667  */
668 int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
669 			      struct ttm_resource *mem,
670 			      struct device *dev,
671 			      enum dma_data_direction dir,
672 			      struct sg_table **sgt)
673 {
674 	struct drm_mm_node *node;
675 	struct scatterlist *sg;
676 	int num_entries = 0;
677 	unsigned int pages;
678 	int i, r;
679 
680 	*sgt = kmalloc(sizeof(**sgt), GFP_KERNEL);
681 	if (!*sgt)
682 		return -ENOMEM;
683 
684 	for (pages = mem->num_pages, node = mem->mm_node;
685 	     pages; pages -= node->size, ++node)
686 		++num_entries;
687 
688 	r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
689 	if (r)
690 		goto error_free;
691 
692 	for_each_sgtable_sg((*sgt), sg, i)
693 		sg->length = 0;
694 
695 	node = mem->mm_node;
696 	for_each_sgtable_sg((*sgt), sg, i) {
697 		phys_addr_t phys = (node->start << PAGE_SHIFT) +
698 			adev->gmc.aper_base;
699 		size_t size = node->size << PAGE_SHIFT;
700 		dma_addr_t addr;
701 
702 		++node;
703 		addr = dma_map_resource(dev, phys, size, dir,
704 					DMA_ATTR_SKIP_CPU_SYNC);
705 		r = dma_mapping_error(dev, addr);
706 		if (r)
707 			goto error_unmap;
708 
709 		sg_set_page(sg, NULL, size, 0);
710 		sg_dma_address(sg) = addr;
711 		sg_dma_len(sg) = size;
712 	}
713 	return 0;
714 
715 error_unmap:
716 	for_each_sgtable_sg((*sgt), sg, i) {
717 		if (!sg->length)
718 			continue;
719 
720 		dma_unmap_resource(dev, sg->dma_address,
721 				   sg->length, dir,
722 				   DMA_ATTR_SKIP_CPU_SYNC);
723 	}
724 	sg_free_table(*sgt);
725 
726 error_free:
727 	kfree(*sgt);
728 	return r;
729 }
730 
731 /**
732  * amdgpu_vram_mgr_free_sgt - allocate and fill a sg table
733  *
734  * @dev: device pointer
735  * @dir: data direction of resource to unmap
736  * @sgt: sg table to free
737  *
738  * Free a previously allocate sg table.
739  */
740 void amdgpu_vram_mgr_free_sgt(struct device *dev,
741 			      enum dma_data_direction dir,
742 			      struct sg_table *sgt)
743 {
744 	struct scatterlist *sg;
745 	int i;
746 
747 	for_each_sgtable_sg(sgt, sg, i)
748 		dma_unmap_resource(dev, sg->dma_address,
749 				   sg->length, dir,
750 				   DMA_ATTR_SKIP_CPU_SYNC);
751 	sg_free_table(sgt);
752 	kfree(sgt);
753 }
754 
755 /**
756  * amdgpu_vram_mgr_usage - how many bytes are used in this domain
757  *
758  * @man: TTM memory type manager
759  *
760  * Returns how many bytes are used in this domain.
761  */
762 uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man)
763 {
764 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
765 
766 	return atomic64_read(&mgr->usage);
767 }
768 
769 /**
770  * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part
771  *
772  * @man: TTM memory type manager
773  *
774  * Returns how many bytes are used in the visible part of VRAM
775  */
776 uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man)
777 {
778 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
779 
780 	return atomic64_read(&mgr->vis_usage);
781 }
782 
783 /**
784  * amdgpu_vram_mgr_debug - dump VRAM table
785  *
786  * @man: TTM memory type manager
787  * @printer: DRM printer to use
788  *
789  * Dump the table content using printk.
790  */
791 static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
792 				  struct drm_printer *printer)
793 {
794 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
795 
796 	spin_lock(&mgr->lock);
797 	drm_mm_print(&mgr->mm, printer);
798 	spin_unlock(&mgr->lock);
799 
800 	drm_printf(printer, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
801 		   man->size, amdgpu_vram_mgr_usage(man) >> 20,
802 		   amdgpu_vram_mgr_vis_usage(man) >> 20);
803 }
804 
805 static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
806 	.alloc	= amdgpu_vram_mgr_new,
807 	.free	= amdgpu_vram_mgr_del,
808 	.debug	= amdgpu_vram_mgr_debug
809 };
810