1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König 23 */ 24 25 #include <linux/dma-mapping.h> 26 #include "amdgpu.h" 27 #include "amdgpu_vm.h" 28 #include "amdgpu_res_cursor.h" 29 #include "amdgpu_atomfirmware.h" 30 #include "atom.h" 31 32 static inline struct amdgpu_vram_mgr * 33 to_vram_mgr(struct ttm_resource_manager *man) 34 { 35 return container_of(man, struct amdgpu_vram_mgr, manager); 36 } 37 38 static inline struct amdgpu_device * 39 to_amdgpu_device(struct amdgpu_vram_mgr *mgr) 40 { 41 return container_of(mgr, struct amdgpu_device, mman.vram_mgr); 42 } 43 44 /** 45 * DOC: mem_info_vram_total 46 * 47 * The amdgpu driver provides a sysfs API for reporting current total VRAM 48 * available on the device 49 * The file mem_info_vram_total is used for this and returns the total 50 * amount of VRAM in bytes 51 */ 52 static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev, 53 struct device_attribute *attr, char *buf) 54 { 55 struct drm_device *ddev = dev_get_drvdata(dev); 56 struct amdgpu_device *adev = drm_to_adev(ddev); 57 58 return sysfs_emit(buf, "%llu\n", adev->gmc.real_vram_size); 59 } 60 61 /** 62 * DOC: mem_info_vis_vram_total 63 * 64 * The amdgpu driver provides a sysfs API for reporting current total 65 * visible VRAM available on the device 66 * The file mem_info_vis_vram_total is used for this and returns the total 67 * amount of visible VRAM in bytes 68 */ 69 static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev, 70 struct device_attribute *attr, char *buf) 71 { 72 struct drm_device *ddev = dev_get_drvdata(dev); 73 struct amdgpu_device *adev = drm_to_adev(ddev); 74 75 return sysfs_emit(buf, "%llu\n", adev->gmc.visible_vram_size); 76 } 77 78 /** 79 * DOC: mem_info_vram_used 80 * 81 * The amdgpu driver provides a sysfs API for reporting current total VRAM 82 * available on the device 83 * The file mem_info_vram_used is used for this and returns the total 84 * amount of currently used VRAM in bytes 85 */ 86 static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev, 87 struct device_attribute *attr, 88 char *buf) 89 { 90 struct drm_device *ddev = dev_get_drvdata(dev); 91 struct amdgpu_device *adev = drm_to_adev(ddev); 92 struct ttm_resource_manager *man; 93 94 man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); 95 return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_usage(man)); 96 } 97 98 /** 99 * DOC: mem_info_vis_vram_used 100 * 101 * The amdgpu driver provides a sysfs API for reporting current total of 102 * used visible VRAM 103 * The file mem_info_vis_vram_used is used for this and returns the total 104 * amount of currently used visible VRAM in bytes 105 */ 106 static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev, 107 struct device_attribute *attr, 108 char *buf) 109 { 110 struct drm_device *ddev = dev_get_drvdata(dev); 111 struct amdgpu_device *adev = drm_to_adev(ddev); 112 struct ttm_resource_manager *man; 113 114 man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); 115 return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_vis_usage(man)); 116 } 117 118 /** 119 * DOC: mem_info_vram_vendor 120 * 121 * The amdgpu driver provides a sysfs API for reporting the vendor of the 122 * installed VRAM 123 * The file mem_info_vram_vendor is used for this and returns the name of the 124 * vendor. 125 */ 126 static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev, 127 struct device_attribute *attr, 128 char *buf) 129 { 130 struct drm_device *ddev = dev_get_drvdata(dev); 131 struct amdgpu_device *adev = drm_to_adev(ddev); 132 133 switch (adev->gmc.vram_vendor) { 134 case SAMSUNG: 135 return sysfs_emit(buf, "samsung\n"); 136 case INFINEON: 137 return sysfs_emit(buf, "infineon\n"); 138 case ELPIDA: 139 return sysfs_emit(buf, "elpida\n"); 140 case ETRON: 141 return sysfs_emit(buf, "etron\n"); 142 case NANYA: 143 return sysfs_emit(buf, "nanya\n"); 144 case HYNIX: 145 return sysfs_emit(buf, "hynix\n"); 146 case MOSEL: 147 return sysfs_emit(buf, "mosel\n"); 148 case WINBOND: 149 return sysfs_emit(buf, "winbond\n"); 150 case ESMT: 151 return sysfs_emit(buf, "esmt\n"); 152 case MICRON: 153 return sysfs_emit(buf, "micron\n"); 154 default: 155 return sysfs_emit(buf, "unknown\n"); 156 } 157 } 158 159 static DEVICE_ATTR(mem_info_vram_total, S_IRUGO, 160 amdgpu_mem_info_vram_total_show, NULL); 161 static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO, 162 amdgpu_mem_info_vis_vram_total_show,NULL); 163 static DEVICE_ATTR(mem_info_vram_used, S_IRUGO, 164 amdgpu_mem_info_vram_used_show, NULL); 165 static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO, 166 amdgpu_mem_info_vis_vram_used_show, NULL); 167 static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO, 168 amdgpu_mem_info_vram_vendor, NULL); 169 170 static const struct attribute *amdgpu_vram_mgr_attributes[] = { 171 &dev_attr_mem_info_vram_total.attr, 172 &dev_attr_mem_info_vis_vram_total.attr, 173 &dev_attr_mem_info_vram_used.attr, 174 &dev_attr_mem_info_vis_vram_used.attr, 175 &dev_attr_mem_info_vram_vendor.attr, 176 NULL 177 }; 178 179 /** 180 * amdgpu_vram_mgr_vis_size - Calculate visible node size 181 * 182 * @adev: amdgpu_device pointer 183 * @node: MM node structure 184 * 185 * Calculate how many bytes of the MM node are inside visible VRAM 186 */ 187 static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev, 188 struct drm_mm_node *node) 189 { 190 uint64_t start = node->start << PAGE_SHIFT; 191 uint64_t end = (node->size + node->start) << PAGE_SHIFT; 192 193 if (start >= adev->gmc.visible_vram_size) 194 return 0; 195 196 return (end > adev->gmc.visible_vram_size ? 197 adev->gmc.visible_vram_size : end) - start; 198 } 199 200 /** 201 * amdgpu_vram_mgr_bo_visible_size - CPU visible BO size 202 * 203 * @bo: &amdgpu_bo buffer object (must be in VRAM) 204 * 205 * Returns: 206 * How much of the given &amdgpu_bo buffer object lies in CPU visible VRAM. 207 */ 208 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo) 209 { 210 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 211 struct ttm_resource *mem = &bo->tbo.mem; 212 struct drm_mm_node *nodes = mem->mm_node; 213 unsigned pages = mem->num_pages; 214 u64 usage; 215 216 if (amdgpu_gmc_vram_full_visible(&adev->gmc)) 217 return amdgpu_bo_size(bo); 218 219 if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT) 220 return 0; 221 222 for (usage = 0; nodes && pages; pages -= nodes->size, nodes++) 223 usage += amdgpu_vram_mgr_vis_size(adev, nodes); 224 225 return usage; 226 } 227 228 /* Commit the reservation of VRAM pages */ 229 static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man) 230 { 231 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); 232 struct amdgpu_device *adev = to_amdgpu_device(mgr); 233 struct drm_mm *mm = &mgr->mm; 234 struct amdgpu_vram_reservation *rsv, *temp; 235 uint64_t vis_usage; 236 237 list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node) { 238 if (drm_mm_reserve_node(mm, &rsv->mm_node)) 239 continue; 240 241 dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n", 242 rsv->mm_node.start, rsv->mm_node.size); 243 244 vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node); 245 atomic64_add(vis_usage, &mgr->vis_usage); 246 atomic64_add(rsv->mm_node.size << PAGE_SHIFT, &mgr->usage); 247 list_move(&rsv->node, &mgr->reserved_pages); 248 } 249 } 250 251 /** 252 * amdgpu_vram_mgr_reserve_range - Reserve a range from VRAM 253 * 254 * @man: TTM memory type manager 255 * @start: start address of the range in VRAM 256 * @size: size of the range 257 * 258 * Reserve memory from start addess with the specified size in VRAM 259 */ 260 int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man, 261 uint64_t start, uint64_t size) 262 { 263 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); 264 struct amdgpu_vram_reservation *rsv; 265 266 rsv = kzalloc(sizeof(*rsv), GFP_KERNEL); 267 if (!rsv) 268 return -ENOMEM; 269 270 INIT_LIST_HEAD(&rsv->node); 271 rsv->mm_node.start = start >> PAGE_SHIFT; 272 rsv->mm_node.size = size >> PAGE_SHIFT; 273 274 spin_lock(&mgr->lock); 275 list_add_tail(&mgr->reservations_pending, &rsv->node); 276 amdgpu_vram_mgr_do_reserve(man); 277 spin_unlock(&mgr->lock); 278 279 return 0; 280 } 281 282 /** 283 * amdgpu_vram_mgr_query_page_status - query the reservation status 284 * 285 * @man: TTM memory type manager 286 * @start: start address of a page in VRAM 287 * 288 * Returns: 289 * -EBUSY: the page is still hold and in pending list 290 * 0: the page has been reserved 291 * -ENOENT: the input page is not a reservation 292 */ 293 int amdgpu_vram_mgr_query_page_status(struct ttm_resource_manager *man, 294 uint64_t start) 295 { 296 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); 297 struct amdgpu_vram_reservation *rsv; 298 int ret; 299 300 spin_lock(&mgr->lock); 301 302 list_for_each_entry(rsv, &mgr->reservations_pending, node) { 303 if ((rsv->mm_node.start <= start) && 304 (start < (rsv->mm_node.start + rsv->mm_node.size))) { 305 ret = -EBUSY; 306 goto out; 307 } 308 } 309 310 list_for_each_entry(rsv, &mgr->reserved_pages, node) { 311 if ((rsv->mm_node.start <= start) && 312 (start < (rsv->mm_node.start + rsv->mm_node.size))) { 313 ret = 0; 314 goto out; 315 } 316 } 317 318 ret = -ENOENT; 319 out: 320 spin_unlock(&mgr->lock); 321 return ret; 322 } 323 324 /** 325 * amdgpu_vram_mgr_virt_start - update virtual start address 326 * 327 * @mem: ttm_resource to update 328 * @node: just allocated node 329 * 330 * Calculate a virtual BO start address to easily check if everything is CPU 331 * accessible. 332 */ 333 static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem, 334 struct drm_mm_node *node) 335 { 336 unsigned long start; 337 338 start = node->start + node->size; 339 if (start > mem->num_pages) 340 start -= mem->num_pages; 341 else 342 start = 0; 343 mem->start = max(mem->start, start); 344 } 345 346 /** 347 * amdgpu_vram_mgr_new - allocate new ranges 348 * 349 * @man: TTM memory type manager 350 * @tbo: TTM BO we need this range for 351 * @place: placement flags and restrictions 352 * @mem: the resulting mem object 353 * 354 * Allocate VRAM for the given BO. 355 */ 356 static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, 357 struct ttm_buffer_object *tbo, 358 const struct ttm_place *place, 359 struct ttm_resource *mem) 360 { 361 unsigned long lpfn, num_nodes, pages_per_node, pages_left, pages; 362 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); 363 struct amdgpu_device *adev = to_amdgpu_device(mgr); 364 uint64_t vis_usage = 0, mem_bytes, max_bytes; 365 struct drm_mm *mm = &mgr->mm; 366 enum drm_mm_insert_mode mode; 367 struct drm_mm_node *nodes; 368 unsigned i; 369 int r; 370 371 lpfn = place->lpfn; 372 if (!lpfn) 373 lpfn = man->size; 374 375 max_bytes = adev->gmc.mc_vram_size; 376 if (tbo->type != ttm_bo_type_kernel) 377 max_bytes -= AMDGPU_VM_RESERVED_VRAM; 378 379 /* bail out quickly if there's likely not enough VRAM for this BO */ 380 mem_bytes = (u64)mem->num_pages << PAGE_SHIFT; 381 if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) { 382 atomic64_sub(mem_bytes, &mgr->usage); 383 return -ENOSPC; 384 } 385 386 if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { 387 pages_per_node = ~0ul; 388 num_nodes = 1; 389 } else { 390 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 391 pages_per_node = HPAGE_PMD_NR; 392 #else 393 /* default to 2MB */ 394 pages_per_node = 2UL << (20UL - PAGE_SHIFT); 395 #endif 396 pages_per_node = max_t(uint32_t, pages_per_node, 397 mem->page_alignment); 398 num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node); 399 } 400 401 nodes = kvmalloc_array((uint32_t)num_nodes, sizeof(*nodes), 402 GFP_KERNEL | __GFP_ZERO); 403 if (!nodes) { 404 atomic64_sub(mem_bytes, &mgr->usage); 405 return -ENOMEM; 406 } 407 408 mode = DRM_MM_INSERT_BEST; 409 if (place->flags & TTM_PL_FLAG_TOPDOWN) 410 mode = DRM_MM_INSERT_HIGH; 411 412 mem->start = 0; 413 pages_left = mem->num_pages; 414 415 /* Limit maximum size to 2GB due to SG table limitations */ 416 pages = min(pages_left, 2UL << (30 - PAGE_SHIFT)); 417 418 i = 0; 419 spin_lock(&mgr->lock); 420 while (pages_left) { 421 uint32_t alignment = mem->page_alignment; 422 423 if (pages >= pages_per_node) 424 alignment = pages_per_node; 425 426 r = drm_mm_insert_node_in_range(mm, &nodes[i], pages, alignment, 427 0, place->fpfn, lpfn, mode); 428 if (unlikely(r)) { 429 if (pages > pages_per_node) { 430 if (is_power_of_2(pages)) 431 pages = pages / 2; 432 else 433 pages = rounddown_pow_of_two(pages); 434 continue; 435 } 436 goto error; 437 } 438 439 vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]); 440 amdgpu_vram_mgr_virt_start(mem, &nodes[i]); 441 pages_left -= pages; 442 ++i; 443 444 if (pages > pages_left) 445 pages = pages_left; 446 } 447 spin_unlock(&mgr->lock); 448 449 atomic64_add(vis_usage, &mgr->vis_usage); 450 451 mem->mm_node = nodes; 452 453 return 0; 454 455 error: 456 while (i--) 457 drm_mm_remove_node(&nodes[i]); 458 spin_unlock(&mgr->lock); 459 atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage); 460 461 kvfree(nodes); 462 return r; 463 } 464 465 /** 466 * amdgpu_vram_mgr_del - free ranges 467 * 468 * @man: TTM memory type manager 469 * @mem: TTM memory object 470 * 471 * Free the allocated VRAM again. 472 */ 473 static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, 474 struct ttm_resource *mem) 475 { 476 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); 477 struct amdgpu_device *adev = to_amdgpu_device(mgr); 478 struct drm_mm_node *nodes = mem->mm_node; 479 uint64_t usage = 0, vis_usage = 0; 480 unsigned pages = mem->num_pages; 481 482 if (!mem->mm_node) 483 return; 484 485 spin_lock(&mgr->lock); 486 while (pages) { 487 pages -= nodes->size; 488 drm_mm_remove_node(nodes); 489 usage += nodes->size << PAGE_SHIFT; 490 vis_usage += amdgpu_vram_mgr_vis_size(adev, nodes); 491 ++nodes; 492 } 493 amdgpu_vram_mgr_do_reserve(man); 494 spin_unlock(&mgr->lock); 495 496 atomic64_sub(usage, &mgr->usage); 497 atomic64_sub(vis_usage, &mgr->vis_usage); 498 499 kvfree(mem->mm_node); 500 mem->mm_node = NULL; 501 } 502 503 /** 504 * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table 505 * 506 * @adev: amdgpu device pointer 507 * @mem: TTM memory object 508 * @offset: byte offset from the base of VRAM BO 509 * @length: number of bytes to export in sg_table 510 * @dev: the other device 511 * @dir: dma direction 512 * @sgt: resulting sg table 513 * 514 * Allocate and fill a sg table from a VRAM allocation. 515 */ 516 int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, 517 struct ttm_resource *mem, 518 u64 offset, u64 length, 519 struct device *dev, 520 enum dma_data_direction dir, 521 struct sg_table **sgt) 522 { 523 struct amdgpu_res_cursor cursor; 524 struct scatterlist *sg; 525 int num_entries = 0; 526 int i, r; 527 528 *sgt = kmalloc(sizeof(**sgt), GFP_KERNEL); 529 if (!*sgt) 530 return -ENOMEM; 531 532 /* Determine the number of DRM_MM nodes to export */ 533 amdgpu_res_first(mem, offset, length, &cursor); 534 while (cursor.remaining) { 535 num_entries++; 536 amdgpu_res_next(&cursor, cursor.size); 537 } 538 539 r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL); 540 if (r) 541 goto error_free; 542 543 /* Initialize scatterlist nodes of sg_table */ 544 for_each_sgtable_sg((*sgt), sg, i) 545 sg->length = 0; 546 547 /* 548 * Walk down DRM_MM nodes to populate scatterlist nodes 549 * @note: Use iterator api to get first the DRM_MM node 550 * and the number of bytes from it. Access the following 551 * DRM_MM node(s) if more buffer needs to exported 552 */ 553 amdgpu_res_first(mem, offset, length, &cursor); 554 for_each_sgtable_sg((*sgt), sg, i) { 555 phys_addr_t phys = cursor.start + adev->gmc.aper_base; 556 size_t size = cursor.size; 557 dma_addr_t addr; 558 559 addr = dma_map_resource(dev, phys, size, dir, 560 DMA_ATTR_SKIP_CPU_SYNC); 561 r = dma_mapping_error(dev, addr); 562 if (r) 563 goto error_unmap; 564 565 sg_set_page(sg, NULL, size, 0); 566 sg_dma_address(sg) = addr; 567 sg_dma_len(sg) = size; 568 569 amdgpu_res_next(&cursor, cursor.size); 570 } 571 572 return 0; 573 574 error_unmap: 575 for_each_sgtable_sg((*sgt), sg, i) { 576 if (!sg->length) 577 continue; 578 579 dma_unmap_resource(dev, sg->dma_address, 580 sg->length, dir, 581 DMA_ATTR_SKIP_CPU_SYNC); 582 } 583 sg_free_table(*sgt); 584 585 error_free: 586 kfree(*sgt); 587 return r; 588 } 589 590 /** 591 * amdgpu_vram_mgr_free_sgt - allocate and fill a sg table 592 * 593 * @dev: device pointer 594 * @dir: data direction of resource to unmap 595 * @sgt: sg table to free 596 * 597 * Free a previously allocate sg table. 598 */ 599 void amdgpu_vram_mgr_free_sgt(struct device *dev, 600 enum dma_data_direction dir, 601 struct sg_table *sgt) 602 { 603 struct scatterlist *sg; 604 int i; 605 606 for_each_sgtable_sg(sgt, sg, i) 607 dma_unmap_resource(dev, sg->dma_address, 608 sg->length, dir, 609 DMA_ATTR_SKIP_CPU_SYNC); 610 sg_free_table(sgt); 611 kfree(sgt); 612 } 613 614 /** 615 * amdgpu_vram_mgr_usage - how many bytes are used in this domain 616 * 617 * @man: TTM memory type manager 618 * 619 * Returns how many bytes are used in this domain. 620 */ 621 uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man) 622 { 623 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); 624 625 return atomic64_read(&mgr->usage); 626 } 627 628 /** 629 * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part 630 * 631 * @man: TTM memory type manager 632 * 633 * Returns how many bytes are used in the visible part of VRAM 634 */ 635 uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man) 636 { 637 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); 638 639 return atomic64_read(&mgr->vis_usage); 640 } 641 642 /** 643 * amdgpu_vram_mgr_debug - dump VRAM table 644 * 645 * @man: TTM memory type manager 646 * @printer: DRM printer to use 647 * 648 * Dump the table content using printk. 649 */ 650 static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man, 651 struct drm_printer *printer) 652 { 653 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); 654 655 spin_lock(&mgr->lock); 656 drm_mm_print(&mgr->mm, printer); 657 spin_unlock(&mgr->lock); 658 659 drm_printf(printer, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n", 660 man->size, amdgpu_vram_mgr_usage(man) >> 20, 661 amdgpu_vram_mgr_vis_usage(man) >> 20); 662 } 663 664 static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = { 665 .alloc = amdgpu_vram_mgr_new, 666 .free = amdgpu_vram_mgr_del, 667 .debug = amdgpu_vram_mgr_debug 668 }; 669 670 /** 671 * amdgpu_vram_mgr_init - init VRAM manager and DRM MM 672 * 673 * @adev: amdgpu_device pointer 674 * 675 * Allocate and initialize the VRAM manager. 676 */ 677 int amdgpu_vram_mgr_init(struct amdgpu_device *adev) 678 { 679 struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; 680 struct ttm_resource_manager *man = &mgr->manager; 681 int ret; 682 683 ttm_resource_manager_init(man, adev->gmc.real_vram_size >> PAGE_SHIFT); 684 685 man->func = &amdgpu_vram_mgr_func; 686 687 drm_mm_init(&mgr->mm, 0, man->size); 688 spin_lock_init(&mgr->lock); 689 INIT_LIST_HEAD(&mgr->reservations_pending); 690 INIT_LIST_HEAD(&mgr->reserved_pages); 691 692 /* Add the two VRAM-related sysfs files */ 693 ret = sysfs_create_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes); 694 if (ret) 695 DRM_ERROR("Failed to register sysfs\n"); 696 697 ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager); 698 ttm_resource_manager_set_used(man, true); 699 return 0; 700 } 701 702 /** 703 * amdgpu_vram_mgr_fini - free and destroy VRAM manager 704 * 705 * @adev: amdgpu_device pointer 706 * 707 * Destroy and free the VRAM manager, returns -EBUSY if ranges are still 708 * allocated inside it. 709 */ 710 void amdgpu_vram_mgr_fini(struct amdgpu_device *adev) 711 { 712 struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; 713 struct ttm_resource_manager *man = &mgr->manager; 714 int ret; 715 struct amdgpu_vram_reservation *rsv, *temp; 716 717 ttm_resource_manager_set_used(man, false); 718 719 ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man); 720 if (ret) 721 return; 722 723 spin_lock(&mgr->lock); 724 list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node) 725 kfree(rsv); 726 727 list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, node) { 728 drm_mm_remove_node(&rsv->mm_node); 729 kfree(rsv); 730 } 731 drm_mm_takedown(&mgr->mm); 732 spin_unlock(&mgr->lock); 733 734 sysfs_remove_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes); 735 736 ttm_resource_manager_cleanup(man); 737 ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL); 738 } 739