1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König 23 */ 24 25 #include <linux/dma-mapping.h> 26 #include "amdgpu.h" 27 #include "amdgpu_vm.h" 28 #include "amdgpu_atomfirmware.h" 29 #include "atom.h" 30 31 static inline struct amdgpu_vram_mgr *to_vram_mgr(struct ttm_resource_manager *man) 32 { 33 return container_of(man, struct amdgpu_vram_mgr, manager); 34 } 35 36 static inline struct amdgpu_device *to_amdgpu_device(struct amdgpu_vram_mgr *mgr) 37 { 38 return container_of(mgr, struct amdgpu_device, mman.vram_mgr); 39 } 40 41 /** 42 * DOC: mem_info_vram_total 43 * 44 * The amdgpu driver provides a sysfs API for reporting current total VRAM 45 * available on the device 46 * The file mem_info_vram_total is used for this and returns the total 47 * amount of VRAM in bytes 48 */ 49 static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev, 50 struct device_attribute *attr, char *buf) 51 { 52 struct drm_device *ddev = dev_get_drvdata(dev); 53 struct amdgpu_device *adev = drm_to_adev(ddev); 54 55 return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.real_vram_size); 56 } 57 58 /** 59 * DOC: mem_info_vis_vram_total 60 * 61 * The amdgpu driver provides a sysfs API for reporting current total 62 * visible VRAM available on the device 63 * The file mem_info_vis_vram_total is used for this and returns the total 64 * amount of visible VRAM in bytes 65 */ 66 static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev, 67 struct device_attribute *attr, char *buf) 68 { 69 struct drm_device *ddev = dev_get_drvdata(dev); 70 struct amdgpu_device *adev = drm_to_adev(ddev); 71 72 return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.visible_vram_size); 73 } 74 75 /** 76 * DOC: mem_info_vram_used 77 * 78 * The amdgpu driver provides a sysfs API for reporting current total VRAM 79 * available on the device 80 * The file mem_info_vram_used is used for this and returns the total 81 * amount of currently used VRAM in bytes 82 */ 83 static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev, 84 struct device_attribute *attr, char *buf) 85 { 86 struct drm_device *ddev = dev_get_drvdata(dev); 87 struct amdgpu_device *adev = drm_to_adev(ddev); 88 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); 89 90 return snprintf(buf, PAGE_SIZE, "%llu\n", 91 amdgpu_vram_mgr_usage(man)); 92 } 93 94 /** 95 * DOC: mem_info_vis_vram_used 96 * 97 * The amdgpu driver provides a sysfs API for reporting current total of 98 * used visible VRAM 99 * The file mem_info_vis_vram_used is used for this and returns the total 100 * amount of currently used visible VRAM in bytes 101 */ 102 static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev, 103 struct device_attribute *attr, char *buf) 104 { 105 struct drm_device *ddev = dev_get_drvdata(dev); 106 struct amdgpu_device *adev = drm_to_adev(ddev); 107 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); 108 109 return snprintf(buf, PAGE_SIZE, "%llu\n", 110 amdgpu_vram_mgr_vis_usage(man)); 111 } 112 113 static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev, 114 struct device_attribute *attr, 115 char *buf) 116 { 117 struct drm_device *ddev = dev_get_drvdata(dev); 118 struct amdgpu_device *adev = drm_to_adev(ddev); 119 120 switch (adev->gmc.vram_vendor) { 121 case SAMSUNG: 122 return snprintf(buf, PAGE_SIZE, "samsung\n"); 123 case INFINEON: 124 return snprintf(buf, PAGE_SIZE, "infineon\n"); 125 case ELPIDA: 126 return snprintf(buf, PAGE_SIZE, "elpida\n"); 127 case ETRON: 128 return snprintf(buf, PAGE_SIZE, "etron\n"); 129 case NANYA: 130 return snprintf(buf, PAGE_SIZE, "nanya\n"); 131 case HYNIX: 132 return snprintf(buf, PAGE_SIZE, "hynix\n"); 133 case MOSEL: 134 return snprintf(buf, PAGE_SIZE, "mosel\n"); 135 case WINBOND: 136 return snprintf(buf, PAGE_SIZE, "winbond\n"); 137 case ESMT: 138 return snprintf(buf, PAGE_SIZE, "esmt\n"); 139 case MICRON: 140 return snprintf(buf, PAGE_SIZE, "micron\n"); 141 default: 142 return snprintf(buf, PAGE_SIZE, "unknown\n"); 143 } 144 } 145 146 static DEVICE_ATTR(mem_info_vram_total, S_IRUGO, 147 amdgpu_mem_info_vram_total_show, NULL); 148 static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO, 149 amdgpu_mem_info_vis_vram_total_show,NULL); 150 static DEVICE_ATTR(mem_info_vram_used, S_IRUGO, 151 amdgpu_mem_info_vram_used_show, NULL); 152 static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO, 153 amdgpu_mem_info_vis_vram_used_show, NULL); 154 static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO, 155 amdgpu_mem_info_vram_vendor, NULL); 156 157 static const struct attribute *amdgpu_vram_mgr_attributes[] = { 158 &dev_attr_mem_info_vram_total.attr, 159 &dev_attr_mem_info_vis_vram_total.attr, 160 &dev_attr_mem_info_vram_used.attr, 161 &dev_attr_mem_info_vis_vram_used.attr, 162 &dev_attr_mem_info_vram_vendor.attr, 163 NULL 164 }; 165 166 static const struct ttm_resource_manager_func amdgpu_vram_mgr_func; 167 168 /** 169 * amdgpu_vram_mgr_init - init VRAM manager and DRM MM 170 * 171 * @adev: amdgpu_device pointer 172 * 173 * Allocate and initialize the VRAM manager. 174 */ 175 int amdgpu_vram_mgr_init(struct amdgpu_device *adev) 176 { 177 struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; 178 struct ttm_resource_manager *man = &mgr->manager; 179 int ret; 180 181 ttm_resource_manager_init(man, adev->gmc.real_vram_size >> PAGE_SHIFT); 182 183 man->func = &amdgpu_vram_mgr_func; 184 185 drm_mm_init(&mgr->mm, 0, man->size); 186 spin_lock_init(&mgr->lock); 187 INIT_LIST_HEAD(&mgr->reservations_pending); 188 INIT_LIST_HEAD(&mgr->reserved_pages); 189 190 /* Add the two VRAM-related sysfs files */ 191 ret = sysfs_create_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes); 192 if (ret) 193 DRM_ERROR("Failed to register sysfs\n"); 194 195 ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager); 196 ttm_resource_manager_set_used(man, true); 197 return 0; 198 } 199 200 /** 201 * amdgpu_vram_mgr_fini - free and destroy VRAM manager 202 * 203 * @adev: amdgpu_device pointer 204 * 205 * Destroy and free the VRAM manager, returns -EBUSY if ranges are still 206 * allocated inside it. 207 */ 208 void amdgpu_vram_mgr_fini(struct amdgpu_device *adev) 209 { 210 struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; 211 struct ttm_resource_manager *man = &mgr->manager; 212 int ret; 213 struct amdgpu_vram_reservation *rsv, *temp; 214 215 ttm_resource_manager_set_used(man, false); 216 217 ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man); 218 if (ret) 219 return; 220 221 spin_lock(&mgr->lock); 222 list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node) 223 kfree(rsv); 224 225 list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, node) { 226 drm_mm_remove_node(&rsv->mm_node); 227 kfree(rsv); 228 } 229 drm_mm_takedown(&mgr->mm); 230 spin_unlock(&mgr->lock); 231 232 sysfs_remove_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes); 233 234 ttm_resource_manager_cleanup(man); 235 ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL); 236 } 237 238 /** 239 * amdgpu_vram_mgr_vis_size - Calculate visible node size 240 * 241 * @adev: amdgpu_device pointer 242 * @node: MM node structure 243 * 244 * Calculate how many bytes of the MM node are inside visible VRAM 245 */ 246 static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev, 247 struct drm_mm_node *node) 248 { 249 uint64_t start = node->start << PAGE_SHIFT; 250 uint64_t end = (node->size + node->start) << PAGE_SHIFT; 251 252 if (start >= adev->gmc.visible_vram_size) 253 return 0; 254 255 return (end > adev->gmc.visible_vram_size ? 256 adev->gmc.visible_vram_size : end) - start; 257 } 258 259 /** 260 * amdgpu_vram_mgr_bo_visible_size - CPU visible BO size 261 * 262 * @bo: &amdgpu_bo buffer object (must be in VRAM) 263 * 264 * Returns: 265 * How much of the given &amdgpu_bo buffer object lies in CPU visible VRAM. 266 */ 267 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo) 268 { 269 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 270 struct ttm_resource *mem = &bo->tbo.mem; 271 struct drm_mm_node *nodes = mem->mm_node; 272 unsigned pages = mem->num_pages; 273 u64 usage; 274 275 if (amdgpu_gmc_vram_full_visible(&adev->gmc)) 276 return amdgpu_bo_size(bo); 277 278 if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT) 279 return 0; 280 281 for (usage = 0; nodes && pages; pages -= nodes->size, nodes++) 282 usage += amdgpu_vram_mgr_vis_size(adev, nodes); 283 284 return usage; 285 } 286 287 static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man) 288 { 289 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); 290 struct amdgpu_device *adev = to_amdgpu_device(mgr); 291 struct drm_mm *mm = &mgr->mm; 292 struct amdgpu_vram_reservation *rsv, *temp; 293 uint64_t vis_usage; 294 295 list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node) { 296 if (drm_mm_reserve_node(mm, &rsv->mm_node)) 297 continue; 298 299 dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n", 300 rsv->mm_node.start, rsv->mm_node.size); 301 302 vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node); 303 atomic64_add(vis_usage, &mgr->vis_usage); 304 atomic64_add(rsv->mm_node.size << PAGE_SHIFT, &mgr->usage); 305 list_move(&rsv->node, &mgr->reserved_pages); 306 } 307 } 308 309 /** 310 * amdgpu_vram_mgr_reserve_range - Reserve a range from VRAM 311 * 312 * @man: TTM memory type manager 313 * @start: start address of the range in VRAM 314 * @size: size of the range 315 * 316 * Reserve memory from start addess with the specified size in VRAM 317 */ 318 int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man, 319 uint64_t start, uint64_t size) 320 { 321 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); 322 struct amdgpu_vram_reservation *rsv; 323 324 rsv = kzalloc(sizeof(*rsv), GFP_KERNEL); 325 if (!rsv) 326 return -ENOMEM; 327 328 INIT_LIST_HEAD(&rsv->node); 329 rsv->mm_node.start = start >> PAGE_SHIFT; 330 rsv->mm_node.size = size >> PAGE_SHIFT; 331 332 spin_lock(&mgr->lock); 333 list_add_tail(&mgr->reservations_pending, &rsv->node); 334 amdgpu_vram_mgr_do_reserve(man); 335 spin_unlock(&mgr->lock); 336 337 return 0; 338 } 339 340 /** 341 * amdgpu_vram_mgr_query_page_status - query the reservation status 342 * 343 * @man: TTM memory type manager 344 * @start: start address of a page in VRAM 345 * 346 * Returns: 347 * -EBUSY: the page is still hold and in pending list 348 * 0: the page has been reserved 349 * -ENOENT: the input page is not a reservation 350 */ 351 int amdgpu_vram_mgr_query_page_status(struct ttm_resource_manager *man, 352 uint64_t start) 353 { 354 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); 355 struct amdgpu_vram_reservation *rsv; 356 int ret; 357 358 spin_lock(&mgr->lock); 359 360 list_for_each_entry(rsv, &mgr->reservations_pending, node) { 361 if ((rsv->mm_node.start <= start) && 362 (start < (rsv->mm_node.start + rsv->mm_node.size))) { 363 ret = -EBUSY; 364 goto out; 365 } 366 } 367 368 list_for_each_entry(rsv, &mgr->reserved_pages, node) { 369 if ((rsv->mm_node.start <= start) && 370 (start < (rsv->mm_node.start + rsv->mm_node.size))) { 371 ret = 0; 372 goto out; 373 } 374 } 375 376 ret = -ENOENT; 377 out: 378 spin_unlock(&mgr->lock); 379 return ret; 380 } 381 382 /** 383 * amdgpu_vram_mgr_virt_start - update virtual start address 384 * 385 * @mem: ttm_resource to update 386 * @node: just allocated node 387 * 388 * Calculate a virtual BO start address to easily check if everything is CPU 389 * accessible. 390 */ 391 static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem, 392 struct drm_mm_node *node) 393 { 394 unsigned long start; 395 396 start = node->start + node->size; 397 if (start > mem->num_pages) 398 start -= mem->num_pages; 399 else 400 start = 0; 401 mem->start = max(mem->start, start); 402 } 403 404 /** 405 * amdgpu_vram_mgr_new - allocate new ranges 406 * 407 * @man: TTM memory type manager 408 * @tbo: TTM BO we need this range for 409 * @place: placement flags and restrictions 410 * @mem: the resulting mem object 411 * 412 * Allocate VRAM for the given BO. 413 */ 414 static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, 415 struct ttm_buffer_object *tbo, 416 const struct ttm_place *place, 417 struct ttm_resource *mem) 418 { 419 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); 420 struct amdgpu_device *adev = to_amdgpu_device(mgr); 421 struct drm_mm *mm = &mgr->mm; 422 struct drm_mm_node *nodes; 423 enum drm_mm_insert_mode mode; 424 unsigned long lpfn, num_nodes, pages_per_node, pages_left; 425 uint64_t vis_usage = 0, mem_bytes, max_bytes; 426 unsigned i; 427 int r; 428 429 lpfn = place->lpfn; 430 if (!lpfn) 431 lpfn = man->size; 432 433 max_bytes = adev->gmc.mc_vram_size; 434 if (tbo->type != ttm_bo_type_kernel) 435 max_bytes -= AMDGPU_VM_RESERVED_VRAM; 436 437 /* bail out quickly if there's likely not enough VRAM for this BO */ 438 mem_bytes = (u64)mem->num_pages << PAGE_SHIFT; 439 if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) { 440 atomic64_sub(mem_bytes, &mgr->usage); 441 return -ENOSPC; 442 } 443 444 if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { 445 pages_per_node = ~0ul; 446 num_nodes = 1; 447 } else { 448 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 449 pages_per_node = HPAGE_PMD_NR; 450 #else 451 /* default to 2MB */ 452 pages_per_node = (2UL << (20UL - PAGE_SHIFT)); 453 #endif 454 pages_per_node = max((uint32_t)pages_per_node, mem->page_alignment); 455 num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node); 456 } 457 458 nodes = kvmalloc_array((uint32_t)num_nodes, sizeof(*nodes), 459 GFP_KERNEL | __GFP_ZERO); 460 if (!nodes) { 461 atomic64_sub(mem_bytes, &mgr->usage); 462 return -ENOMEM; 463 } 464 465 mode = DRM_MM_INSERT_BEST; 466 if (place->flags & TTM_PL_FLAG_TOPDOWN) 467 mode = DRM_MM_INSERT_HIGH; 468 469 mem->start = 0; 470 pages_left = mem->num_pages; 471 472 spin_lock(&mgr->lock); 473 for (i = 0; pages_left >= pages_per_node; ++i) { 474 unsigned long pages = rounddown_pow_of_two(pages_left); 475 476 /* Limit maximum size to 2GB due to SG table limitations */ 477 pages = min(pages, (2UL << (30 - PAGE_SHIFT))); 478 479 r = drm_mm_insert_node_in_range(mm, &nodes[i], pages, 480 pages_per_node, 0, 481 place->fpfn, lpfn, 482 mode); 483 if (unlikely(r)) 484 break; 485 486 vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]); 487 amdgpu_vram_mgr_virt_start(mem, &nodes[i]); 488 pages_left -= pages; 489 } 490 491 for (; pages_left; ++i) { 492 unsigned long pages = min(pages_left, pages_per_node); 493 uint32_t alignment = mem->page_alignment; 494 495 if (pages == pages_per_node) 496 alignment = pages_per_node; 497 498 r = drm_mm_insert_node_in_range(mm, &nodes[i], 499 pages, alignment, 0, 500 place->fpfn, lpfn, 501 mode); 502 if (unlikely(r)) 503 goto error; 504 505 vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]); 506 amdgpu_vram_mgr_virt_start(mem, &nodes[i]); 507 pages_left -= pages; 508 } 509 spin_unlock(&mgr->lock); 510 511 atomic64_add(vis_usage, &mgr->vis_usage); 512 513 mem->mm_node = nodes; 514 515 return 0; 516 517 error: 518 while (i--) 519 drm_mm_remove_node(&nodes[i]); 520 spin_unlock(&mgr->lock); 521 atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage); 522 523 kvfree(nodes); 524 return r; 525 } 526 527 /** 528 * amdgpu_vram_mgr_del - free ranges 529 * 530 * @man: TTM memory type manager 531 * @mem: TTM memory object 532 * 533 * Free the allocated VRAM again. 534 */ 535 static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, 536 struct ttm_resource *mem) 537 { 538 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); 539 struct amdgpu_device *adev = to_amdgpu_device(mgr); 540 struct drm_mm_node *nodes = mem->mm_node; 541 uint64_t usage = 0, vis_usage = 0; 542 unsigned pages = mem->num_pages; 543 544 if (!mem->mm_node) 545 return; 546 547 spin_lock(&mgr->lock); 548 while (pages) { 549 pages -= nodes->size; 550 drm_mm_remove_node(nodes); 551 usage += nodes->size << PAGE_SHIFT; 552 vis_usage += amdgpu_vram_mgr_vis_size(adev, nodes); 553 ++nodes; 554 } 555 amdgpu_vram_mgr_do_reserve(man); 556 spin_unlock(&mgr->lock); 557 558 atomic64_sub(usage, &mgr->usage); 559 atomic64_sub(vis_usage, &mgr->vis_usage); 560 561 kvfree(mem->mm_node); 562 mem->mm_node = NULL; 563 } 564 565 /** 566 * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table 567 * 568 * @adev: amdgpu device pointer 569 * @mem: TTM memory object 570 * @dev: the other device 571 * @dir: dma direction 572 * @sgt: resulting sg table 573 * 574 * Allocate and fill a sg table from a VRAM allocation. 575 */ 576 int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, 577 struct ttm_resource *mem, 578 struct device *dev, 579 enum dma_data_direction dir, 580 struct sg_table **sgt) 581 { 582 struct drm_mm_node *node; 583 struct scatterlist *sg; 584 int num_entries = 0; 585 unsigned int pages; 586 int i, r; 587 588 *sgt = kmalloc(sizeof(**sgt), GFP_KERNEL); 589 if (!*sgt) 590 return -ENOMEM; 591 592 for (pages = mem->num_pages, node = mem->mm_node; 593 pages; pages -= node->size, ++node) 594 ++num_entries; 595 596 r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL); 597 if (r) 598 goto error_free; 599 600 for_each_sgtable_sg((*sgt), sg, i) 601 sg->length = 0; 602 603 node = mem->mm_node; 604 for_each_sgtable_sg((*sgt), sg, i) { 605 phys_addr_t phys = (node->start << PAGE_SHIFT) + 606 adev->gmc.aper_base; 607 size_t size = node->size << PAGE_SHIFT; 608 dma_addr_t addr; 609 610 ++node; 611 addr = dma_map_resource(dev, phys, size, dir, 612 DMA_ATTR_SKIP_CPU_SYNC); 613 r = dma_mapping_error(dev, addr); 614 if (r) 615 goto error_unmap; 616 617 sg_set_page(sg, NULL, size, 0); 618 sg_dma_address(sg) = addr; 619 sg_dma_len(sg) = size; 620 } 621 return 0; 622 623 error_unmap: 624 for_each_sgtable_sg((*sgt), sg, i) { 625 if (!sg->length) 626 continue; 627 628 dma_unmap_resource(dev, sg->dma_address, 629 sg->length, dir, 630 DMA_ATTR_SKIP_CPU_SYNC); 631 } 632 sg_free_table(*sgt); 633 634 error_free: 635 kfree(*sgt); 636 return r; 637 } 638 639 /** 640 * amdgpu_vram_mgr_free_sgt - allocate and fill a sg table 641 * 642 * @adev: amdgpu device pointer 643 * @dev: device pointer 644 * @dir: data direction of resource to unmap 645 * @sgt: sg table to free 646 * 647 * Free a previously allocate sg table. 648 */ 649 void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev, 650 struct device *dev, 651 enum dma_data_direction dir, 652 struct sg_table *sgt) 653 { 654 struct scatterlist *sg; 655 int i; 656 657 for_each_sgtable_sg(sgt, sg, i) 658 dma_unmap_resource(dev, sg->dma_address, 659 sg->length, dir, 660 DMA_ATTR_SKIP_CPU_SYNC); 661 sg_free_table(sgt); 662 kfree(sgt); 663 } 664 665 /** 666 * amdgpu_vram_mgr_usage - how many bytes are used in this domain 667 * 668 * @man: TTM memory type manager 669 * 670 * Returns how many bytes are used in this domain. 671 */ 672 uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man) 673 { 674 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); 675 676 return atomic64_read(&mgr->usage); 677 } 678 679 /** 680 * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part 681 * 682 * @man: TTM memory type manager 683 * 684 * Returns how many bytes are used in the visible part of VRAM 685 */ 686 uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man) 687 { 688 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); 689 690 return atomic64_read(&mgr->vis_usage); 691 } 692 693 /** 694 * amdgpu_vram_mgr_debug - dump VRAM table 695 * 696 * @man: TTM memory type manager 697 * @printer: DRM printer to use 698 * 699 * Dump the table content using printk. 700 */ 701 static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man, 702 struct drm_printer *printer) 703 { 704 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); 705 706 spin_lock(&mgr->lock); 707 drm_mm_print(&mgr->mm, printer); 708 spin_unlock(&mgr->lock); 709 710 drm_printf(printer, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n", 711 man->size, amdgpu_vram_mgr_usage(man) >> 20, 712 amdgpu_vram_mgr_vis_usage(man) >> 20); 713 } 714 715 static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = { 716 .alloc = amdgpu_vram_mgr_new, 717 .free = amdgpu_vram_mgr_del, 718 .debug = amdgpu_vram_mgr_debug 719 }; 720