1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König 23 */ 24 25 #include <linux/iosys-map.h> 26 #include <linux/io-mapping.h> 27 #include <linux/scatterlist.h> 28 29 #include <drm/ttm/ttm_resource.h> 30 #include <drm/ttm/ttm_bo_driver.h> 31 32 /** 33 * ttm_lru_bulk_move_init - initialize a bulk move structure 34 * @bulk: the structure to init 35 * 36 * For now just memset the structure to zero. 37 */ 38 void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk) 39 { 40 memset(bulk, 0, sizeof(*bulk)); 41 } 42 EXPORT_SYMBOL(ttm_lru_bulk_move_init); 43 44 /** 45 * ttm_lru_bulk_move_tail - bulk move range of resources to the LRU tail. 46 * 47 * @bulk: bulk move structure 48 * 49 * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that 50 * resource order never changes. Should be called with &ttm_device.lru_lock held. 51 */ 52 void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk) 53 { 54 unsigned i, j; 55 56 for (i = 0; i < TTM_NUM_MEM_TYPES; ++i) { 57 for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) { 58 struct ttm_lru_bulk_move_pos *pos = &bulk->pos[i][j]; 59 struct ttm_resource_manager *man; 60 61 if (!pos->first) 62 continue; 63 64 lockdep_assert_held(&pos->first->bo->bdev->lru_lock); 65 dma_resv_assert_held(pos->first->bo->base.resv); 66 dma_resv_assert_held(pos->last->bo->base.resv); 67 68 man = ttm_manager_type(pos->first->bo->bdev, i); 69 list_bulk_move_tail(&man->lru[j], &pos->first->lru, 70 &pos->last->lru); 71 } 72 } 73 } 74 EXPORT_SYMBOL(ttm_lru_bulk_move_tail); 75 76 /* Return the bulk move pos object for this resource */ 77 static struct ttm_lru_bulk_move_pos * 78 ttm_lru_bulk_move_pos(struct ttm_lru_bulk_move *bulk, struct ttm_resource *res) 79 { 80 return &bulk->pos[res->mem_type][res->bo->priority]; 81 } 82 83 /* Move the resource to the tail of the bulk move range */ 84 static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos, 85 struct ttm_resource *res) 86 { 87 if (pos->last != res) { 88 list_move(&res->lru, &pos->last->lru); 89 pos->last = res; 90 } 91 } 92 93 /* Add the resource to a bulk_move cursor */ 94 static void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk, 95 struct ttm_resource *res) 96 { 97 struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res); 98 99 if (!pos->first) { 100 pos->first = res; 101 pos->last = res; 102 } else { 103 ttm_lru_bulk_move_pos_tail(pos, res); 104 } 105 } 106 107 /* Remove the resource from a bulk_move range */ 108 static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk, 109 struct ttm_resource *res) 110 { 111 struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res); 112 113 if (unlikely(pos->first == res && pos->last == res)) { 114 pos->first = NULL; 115 pos->last = NULL; 116 } else if (pos->first == res) { 117 pos->first = list_next_entry(res, lru); 118 } else if (pos->last == res) { 119 pos->last = list_prev_entry(res, lru); 120 } else { 121 list_move(&res->lru, &pos->last->lru); 122 } 123 } 124 125 /* Add the resource to a bulk move if the BO is configured for it */ 126 void ttm_resource_add_bulk_move(struct ttm_resource *res, 127 struct ttm_buffer_object *bo) 128 { 129 if (bo->bulk_move && !bo->pin_count) 130 ttm_lru_bulk_move_add(bo->bulk_move, res); 131 } 132 133 /* Remove the resource from a bulk move if the BO is configured for it */ 134 void ttm_resource_del_bulk_move(struct ttm_resource *res, 135 struct ttm_buffer_object *bo) 136 { 137 if (bo->bulk_move && !bo->pin_count) 138 ttm_lru_bulk_move_del(bo->bulk_move, res); 139 } 140 141 /* Move a resource to the LRU or bulk tail */ 142 void ttm_resource_move_to_lru_tail(struct ttm_resource *res) 143 { 144 struct ttm_buffer_object *bo = res->bo; 145 struct ttm_device *bdev = bo->bdev; 146 147 lockdep_assert_held(&bo->bdev->lru_lock); 148 149 if (bo->pin_count) { 150 list_move_tail(&res->lru, &bdev->pinned); 151 152 } else if (bo->bulk_move) { 153 struct ttm_lru_bulk_move_pos *pos = 154 ttm_lru_bulk_move_pos(bo->bulk_move, res); 155 156 ttm_lru_bulk_move_pos_tail(pos, res); 157 } else { 158 struct ttm_resource_manager *man; 159 160 man = ttm_manager_type(bdev, res->mem_type); 161 list_move_tail(&res->lru, &man->lru[bo->priority]); 162 } 163 } 164 165 /** 166 * ttm_resource_init - resource object constructure 167 * @bo: buffer object this resources is allocated for 168 * @place: placement of the resource 169 * @res: the resource object to inistilize 170 * 171 * Initialize a new resource object. Counterpart of ttm_resource_fini(). 172 */ 173 void ttm_resource_init(struct ttm_buffer_object *bo, 174 const struct ttm_place *place, 175 struct ttm_resource *res) 176 { 177 struct ttm_resource_manager *man; 178 179 res->start = 0; 180 res->num_pages = PFN_UP(bo->base.size); 181 res->mem_type = place->mem_type; 182 res->placement = place->flags; 183 res->bus.addr = NULL; 184 res->bus.offset = 0; 185 res->bus.is_iomem = false; 186 res->bus.caching = ttm_cached; 187 res->bo = bo; 188 189 man = ttm_manager_type(bo->bdev, place->mem_type); 190 spin_lock(&bo->bdev->lru_lock); 191 if (bo->pin_count) 192 list_add_tail(&res->lru, &bo->bdev->pinned); 193 else 194 list_add_tail(&res->lru, &man->lru[bo->priority]); 195 man->usage += res->num_pages << PAGE_SHIFT; 196 spin_unlock(&bo->bdev->lru_lock); 197 } 198 EXPORT_SYMBOL(ttm_resource_init); 199 200 /** 201 * ttm_resource_fini - resource destructor 202 * @man: the resource manager this resource belongs to 203 * @res: the resource to clean up 204 * 205 * Should be used by resource manager backends to clean up the TTM resource 206 * objects before freeing the underlying structure. Makes sure the resource is 207 * removed from the LRU before destruction. 208 * Counterpart of ttm_resource_init(). 209 */ 210 void ttm_resource_fini(struct ttm_resource_manager *man, 211 struct ttm_resource *res) 212 { 213 struct ttm_device *bdev = man->bdev; 214 215 spin_lock(&bdev->lru_lock); 216 list_del_init(&res->lru); 217 man->usage -= res->num_pages << PAGE_SHIFT; 218 spin_unlock(&bdev->lru_lock); 219 } 220 EXPORT_SYMBOL(ttm_resource_fini); 221 222 int ttm_resource_alloc(struct ttm_buffer_object *bo, 223 const struct ttm_place *place, 224 struct ttm_resource **res_ptr) 225 { 226 struct ttm_resource_manager *man = 227 ttm_manager_type(bo->bdev, place->mem_type); 228 int ret; 229 230 ret = man->func->alloc(man, bo, place, res_ptr); 231 if (ret) 232 return ret; 233 234 spin_lock(&bo->bdev->lru_lock); 235 ttm_resource_add_bulk_move(*res_ptr, bo); 236 spin_unlock(&bo->bdev->lru_lock); 237 return 0; 238 } 239 240 void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res) 241 { 242 struct ttm_resource_manager *man; 243 244 if (!*res) 245 return; 246 247 spin_lock(&bo->bdev->lru_lock); 248 ttm_resource_del_bulk_move(*res, bo); 249 spin_unlock(&bo->bdev->lru_lock); 250 man = ttm_manager_type(bo->bdev, (*res)->mem_type); 251 man->func->free(man, *res); 252 *res = NULL; 253 } 254 EXPORT_SYMBOL(ttm_resource_free); 255 256 static bool ttm_resource_places_compat(struct ttm_resource *res, 257 const struct ttm_place *places, 258 unsigned num_placement) 259 { 260 unsigned i; 261 262 if (res->placement & TTM_PL_FLAG_TEMPORARY) 263 return false; 264 265 for (i = 0; i < num_placement; i++) { 266 const struct ttm_place *heap = &places[i]; 267 268 if (res->start < heap->fpfn || (heap->lpfn && 269 (res->start + res->num_pages) > heap->lpfn)) 270 continue; 271 272 if ((res->mem_type == heap->mem_type) && 273 (!(heap->flags & TTM_PL_FLAG_CONTIGUOUS) || 274 (res->placement & TTM_PL_FLAG_CONTIGUOUS))) 275 return true; 276 } 277 return false; 278 } 279 280 /** 281 * ttm_resource_compat - check if resource is compatible with placement 282 * 283 * @res: the resource to check 284 * @placement: the placement to check against 285 * 286 * Returns true if the placement is compatible. 287 */ 288 bool ttm_resource_compat(struct ttm_resource *res, 289 struct ttm_placement *placement) 290 { 291 if (ttm_resource_places_compat(res, placement->placement, 292 placement->num_placement)) 293 return true; 294 295 if ((placement->busy_placement != placement->placement || 296 placement->num_busy_placement > placement->num_placement) && 297 ttm_resource_places_compat(res, placement->busy_placement, 298 placement->num_busy_placement)) 299 return true; 300 301 return false; 302 } 303 EXPORT_SYMBOL(ttm_resource_compat); 304 305 void ttm_resource_set_bo(struct ttm_resource *res, 306 struct ttm_buffer_object *bo) 307 { 308 spin_lock(&bo->bdev->lru_lock); 309 res->bo = bo; 310 spin_unlock(&bo->bdev->lru_lock); 311 } 312 313 /** 314 * ttm_resource_manager_init 315 * 316 * @man: memory manager object to init 317 * @bdev: ttm device this manager belongs to 318 * @size: size of managed resources in arbitrary units 319 * 320 * Initialise core parts of a manager object. 321 */ 322 void ttm_resource_manager_init(struct ttm_resource_manager *man, 323 struct ttm_device *bdev, 324 uint64_t size) 325 { 326 unsigned i; 327 328 spin_lock_init(&man->move_lock); 329 man->bdev = bdev; 330 man->size = size; 331 man->usage = 0; 332 333 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) 334 INIT_LIST_HEAD(&man->lru[i]); 335 man->move = NULL; 336 } 337 EXPORT_SYMBOL(ttm_resource_manager_init); 338 339 /* 340 * ttm_resource_manager_evict_all 341 * 342 * @bdev - device to use 343 * @man - manager to use 344 * 345 * Evict all the objects out of a memory manager until it is empty. 346 * Part of memory manager cleanup sequence. 347 */ 348 int ttm_resource_manager_evict_all(struct ttm_device *bdev, 349 struct ttm_resource_manager *man) 350 { 351 struct ttm_operation_ctx ctx = { 352 .interruptible = false, 353 .no_wait_gpu = false, 354 .force_alloc = true 355 }; 356 struct dma_fence *fence; 357 int ret; 358 unsigned i; 359 360 /* 361 * Can't use standard list traversal since we're unlocking. 362 */ 363 364 spin_lock(&bdev->lru_lock); 365 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { 366 while (!list_empty(&man->lru[i])) { 367 spin_unlock(&bdev->lru_lock); 368 ret = ttm_mem_evict_first(bdev, man, NULL, &ctx, 369 NULL); 370 if (ret) 371 return ret; 372 spin_lock(&bdev->lru_lock); 373 } 374 } 375 spin_unlock(&bdev->lru_lock); 376 377 spin_lock(&man->move_lock); 378 fence = dma_fence_get(man->move); 379 spin_unlock(&man->move_lock); 380 381 if (fence) { 382 ret = dma_fence_wait(fence, false); 383 dma_fence_put(fence); 384 if (ret) 385 return ret; 386 } 387 388 return 0; 389 } 390 EXPORT_SYMBOL(ttm_resource_manager_evict_all); 391 392 /** 393 * ttm_resource_manager_usage 394 * 395 * @man: A memory manager object. 396 * 397 * Return how many resources are currently used. 398 */ 399 uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man) 400 { 401 uint64_t usage; 402 403 spin_lock(&man->bdev->lru_lock); 404 usage = man->usage; 405 spin_unlock(&man->bdev->lru_lock); 406 return usage; 407 } 408 EXPORT_SYMBOL(ttm_resource_manager_usage); 409 410 /** 411 * ttm_resource_manager_debug 412 * 413 * @man: manager type to dump. 414 * @p: printer to use for debug. 415 */ 416 void ttm_resource_manager_debug(struct ttm_resource_manager *man, 417 struct drm_printer *p) 418 { 419 drm_printf(p, " use_type: %d\n", man->use_type); 420 drm_printf(p, " use_tt: %d\n", man->use_tt); 421 drm_printf(p, " size: %llu\n", man->size); 422 drm_printf(p, " usage: %llu\n", ttm_resource_manager_usage(man)); 423 if (man->func->debug) 424 man->func->debug(man, p); 425 } 426 EXPORT_SYMBOL(ttm_resource_manager_debug); 427 428 /** 429 * ttm_resource_manager_first 430 * 431 * @man: resource manager to iterate over 432 * @cursor: cursor to record the position 433 * 434 * Returns the first resource from the resource manager. 435 */ 436 struct ttm_resource * 437 ttm_resource_manager_first(struct ttm_resource_manager *man, 438 struct ttm_resource_cursor *cursor) 439 { 440 struct ttm_resource *res; 441 442 lockdep_assert_held(&man->bdev->lru_lock); 443 444 for (cursor->priority = 0; cursor->priority < TTM_MAX_BO_PRIORITY; 445 ++cursor->priority) 446 list_for_each_entry(res, &man->lru[cursor->priority], lru) 447 return res; 448 449 return NULL; 450 } 451 452 /** 453 * ttm_resource_manager_next 454 * 455 * @man: resource manager to iterate over 456 * @cursor: cursor to record the position 457 * @res: the current resource pointer 458 * 459 * Returns the next resource from the resource manager. 460 */ 461 struct ttm_resource * 462 ttm_resource_manager_next(struct ttm_resource_manager *man, 463 struct ttm_resource_cursor *cursor, 464 struct ttm_resource *res) 465 { 466 lockdep_assert_held(&man->bdev->lru_lock); 467 468 list_for_each_entry_continue(res, &man->lru[cursor->priority], lru) 469 return res; 470 471 for (++cursor->priority; cursor->priority < TTM_MAX_BO_PRIORITY; 472 ++cursor->priority) 473 list_for_each_entry(res, &man->lru[cursor->priority], lru) 474 return res; 475 476 return NULL; 477 } 478 479 static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter, 480 struct iosys_map *dmap, 481 pgoff_t i) 482 { 483 struct ttm_kmap_iter_iomap *iter_io = 484 container_of(iter, typeof(*iter_io), base); 485 void __iomem *addr; 486 487 retry: 488 while (i >= iter_io->cache.end) { 489 iter_io->cache.sg = iter_io->cache.sg ? 490 sg_next(iter_io->cache.sg) : iter_io->st->sgl; 491 iter_io->cache.i = iter_io->cache.end; 492 iter_io->cache.end += sg_dma_len(iter_io->cache.sg) >> 493 PAGE_SHIFT; 494 iter_io->cache.offs = sg_dma_address(iter_io->cache.sg) - 495 iter_io->start; 496 } 497 498 if (i < iter_io->cache.i) { 499 iter_io->cache.end = 0; 500 iter_io->cache.sg = NULL; 501 goto retry; 502 } 503 504 addr = io_mapping_map_local_wc(iter_io->iomap, iter_io->cache.offs + 505 (((resource_size_t)i - iter_io->cache.i) 506 << PAGE_SHIFT)); 507 iosys_map_set_vaddr_iomem(dmap, addr); 508 } 509 510 static void ttm_kmap_iter_iomap_unmap_local(struct ttm_kmap_iter *iter, 511 struct iosys_map *map) 512 { 513 io_mapping_unmap_local(map->vaddr_iomem); 514 } 515 516 static const struct ttm_kmap_iter_ops ttm_kmap_iter_io_ops = { 517 .map_local = ttm_kmap_iter_iomap_map_local, 518 .unmap_local = ttm_kmap_iter_iomap_unmap_local, 519 .maps_tt = false, 520 }; 521 522 /** 523 * ttm_kmap_iter_iomap_init - Initialize a struct ttm_kmap_iter_iomap 524 * @iter_io: The struct ttm_kmap_iter_iomap to initialize. 525 * @iomap: The struct io_mapping representing the underlying linear io_memory. 526 * @st: sg_table into @iomap, representing the memory of the struct 527 * ttm_resource. 528 * @start: Offset that needs to be subtracted from @st to make 529 * sg_dma_address(st->sgl) - @start == 0 for @iomap start. 530 * 531 * Return: Pointer to the embedded struct ttm_kmap_iter. 532 */ 533 struct ttm_kmap_iter * 534 ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io, 535 struct io_mapping *iomap, 536 struct sg_table *st, 537 resource_size_t start) 538 { 539 iter_io->base.ops = &ttm_kmap_iter_io_ops; 540 iter_io->iomap = iomap; 541 iter_io->st = st; 542 iter_io->start = start; 543 memset(&iter_io->cache, 0, sizeof(iter_io->cache)); 544 545 return &iter_io->base; 546 } 547 EXPORT_SYMBOL(ttm_kmap_iter_iomap_init); 548 549 /** 550 * DOC: Linear io iterator 551 * 552 * This code should die in the not too near future. Best would be if we could 553 * make io-mapping use memremap for all io memory, and have memremap 554 * implement a kmap_local functionality. We could then strip a huge amount of 555 * code. These linear io iterators are implemented to mimic old functionality, 556 * and they don't use kmap_local semantics at all internally. Rather ioremap or 557 * friends, and at least on 32-bit they add global TLB flushes and points 558 * of failure. 559 */ 560 561 static void ttm_kmap_iter_linear_io_map_local(struct ttm_kmap_iter *iter, 562 struct iosys_map *dmap, 563 pgoff_t i) 564 { 565 struct ttm_kmap_iter_linear_io *iter_io = 566 container_of(iter, typeof(*iter_io), base); 567 568 *dmap = iter_io->dmap; 569 iosys_map_incr(dmap, i * PAGE_SIZE); 570 } 571 572 static const struct ttm_kmap_iter_ops ttm_kmap_iter_linear_io_ops = { 573 .map_local = ttm_kmap_iter_linear_io_map_local, 574 .maps_tt = false, 575 }; 576 577 /** 578 * ttm_kmap_iter_linear_io_init - Initialize an iterator for linear io memory 579 * @iter_io: The iterator to initialize 580 * @bdev: The TTM device 581 * @mem: The ttm resource representing the iomap. 582 * 583 * This function is for internal TTM use only. It sets up a memcpy kmap iterator 584 * pointing at a linear chunk of io memory. 585 * 586 * Return: A pointer to the embedded struct ttm_kmap_iter or error pointer on 587 * failure. 588 */ 589 struct ttm_kmap_iter * 590 ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io, 591 struct ttm_device *bdev, 592 struct ttm_resource *mem) 593 { 594 int ret; 595 596 ret = ttm_mem_io_reserve(bdev, mem); 597 if (ret) 598 goto out_err; 599 if (!mem->bus.is_iomem) { 600 ret = -EINVAL; 601 goto out_io_free; 602 } 603 604 if (mem->bus.addr) { 605 iosys_map_set_vaddr(&iter_io->dmap, mem->bus.addr); 606 iter_io->needs_unmap = false; 607 } else { 608 size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT; 609 610 iter_io->needs_unmap = true; 611 memset(&iter_io->dmap, 0, sizeof(iter_io->dmap)); 612 if (mem->bus.caching == ttm_write_combined) 613 iosys_map_set_vaddr_iomem(&iter_io->dmap, 614 ioremap_wc(mem->bus.offset, 615 bus_size)); 616 else if (mem->bus.caching == ttm_cached) 617 iosys_map_set_vaddr(&iter_io->dmap, 618 memremap(mem->bus.offset, bus_size, 619 MEMREMAP_WB | 620 MEMREMAP_WT | 621 MEMREMAP_WC)); 622 623 /* If uncached requested or if mapping cached or wc failed */ 624 if (iosys_map_is_null(&iter_io->dmap)) 625 iosys_map_set_vaddr_iomem(&iter_io->dmap, 626 ioremap(mem->bus.offset, 627 bus_size)); 628 629 if (iosys_map_is_null(&iter_io->dmap)) { 630 ret = -ENOMEM; 631 goto out_io_free; 632 } 633 } 634 635 iter_io->base.ops = &ttm_kmap_iter_linear_io_ops; 636 return &iter_io->base; 637 638 out_io_free: 639 ttm_mem_io_free(bdev, mem); 640 out_err: 641 return ERR_PTR(ret); 642 } 643 644 /** 645 * ttm_kmap_iter_linear_io_fini - Clean up an iterator for linear io memory 646 * @iter_io: The iterator to initialize 647 * @bdev: The TTM device 648 * @mem: The ttm resource representing the iomap. 649 * 650 * This function is for internal TTM use only. It cleans up a memcpy kmap 651 * iterator initialized by ttm_kmap_iter_linear_io_init. 652 */ 653 void 654 ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io, 655 struct ttm_device *bdev, 656 struct ttm_resource *mem) 657 { 658 if (iter_io->needs_unmap && iosys_map_is_set(&iter_io->dmap)) { 659 if (iter_io->dmap.is_iomem) 660 iounmap(iter_io->dmap.vaddr_iomem); 661 else 662 memunmap(iter_io->dmap.vaddr); 663 } 664 665 ttm_mem_io_free(bdev, mem); 666 } 667 668 #if defined(CONFIG_DEBUG_FS) 669 670 static int ttm_resource_manager_show(struct seq_file *m, void *unused) 671 { 672 struct ttm_resource_manager *man = 673 (struct ttm_resource_manager *)m->private; 674 struct drm_printer p = drm_seq_file_printer(m); 675 ttm_resource_manager_debug(man, &p); 676 return 0; 677 } 678 DEFINE_SHOW_ATTRIBUTE(ttm_resource_manager); 679 680 #endif 681 682 /** 683 * ttm_resource_manager_create_debugfs - Create debugfs entry for specified 684 * resource manager. 685 * @man: The TTM resource manager for which the debugfs stats file be creates 686 * @parent: debugfs directory in which the file will reside 687 * @name: The filename to create. 688 * 689 * This function setups up a debugfs file that can be used to look 690 * at debug statistics of the specified ttm_resource_manager. 691 */ 692 void ttm_resource_manager_create_debugfs(struct ttm_resource_manager *man, 693 struct dentry * parent, 694 const char *name) 695 { 696 #if defined(CONFIG_DEBUG_FS) 697 debugfs_create_file(name, 0444, parent, man, &ttm_resource_manager_fops); 698 #endif 699 } 700 EXPORT_SYMBOL(ttm_resource_manager_create_debugfs); 701