1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König 23 */ 24 25 #include <linux/iosys-map.h> 26 #include <linux/io-mapping.h> 27 #include <linux/scatterlist.h> 28 29 #include <drm/ttm/ttm_resource.h> 30 #include <drm/ttm/ttm_bo_driver.h> 31 32 /** 33 * ttm_lru_bulk_move_init - initialize a bulk move structure 34 * @bulk: the structure to init 35 * 36 * For now just memset the structure to zero. 37 */ 38 void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk) 39 { 40 memset(bulk, 0, sizeof(*bulk)); 41 } 42 EXPORT_SYMBOL(ttm_lru_bulk_move_init); 43 44 /** 45 * ttm_lru_bulk_move_tail - bulk move range of resources to the LRU tail. 46 * 47 * @bulk: bulk move structure 48 * 49 * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that 50 * resource order never changes. Should be called with &ttm_device.lru_lock held. 51 */ 52 void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk) 53 { 54 unsigned i, j; 55 56 for (i = 0; i < TTM_NUM_MEM_TYPES; ++i) { 57 for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) { 58 struct ttm_lru_bulk_move_pos *pos = &bulk->pos[i][j]; 59 struct ttm_resource_manager *man; 60 61 if (!pos->first) 62 continue; 63 64 lockdep_assert_held(&pos->first->bo->bdev->lru_lock); 65 dma_resv_assert_held(pos->first->bo->base.resv); 66 dma_resv_assert_held(pos->last->bo->base.resv); 67 68 man = ttm_manager_type(pos->first->bo->bdev, i); 69 list_bulk_move_tail(&man->lru[j], &pos->first->lru, 70 &pos->last->lru); 71 } 72 } 73 } 74 EXPORT_SYMBOL(ttm_lru_bulk_move_tail); 75 76 /* Return the bulk move pos object for this resource */ 77 static struct ttm_lru_bulk_move_pos * 78 ttm_lru_bulk_move_pos(struct ttm_lru_bulk_move *bulk, struct ttm_resource *res) 79 { 80 return &bulk->pos[res->mem_type][res->bo->priority]; 81 } 82 83 /* Move the resource to the tail of the bulk move range */ 84 static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos, 85 struct ttm_resource *res) 86 { 87 if (pos->last != res) { 88 list_move(&res->lru, &pos->last->lru); 89 pos->last = res; 90 } 91 } 92 93 /* Add the resource to a bulk_move cursor */ 94 void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk, 95 struct ttm_resource *res) 96 { 97 struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res); 98 99 if (!pos->first) { 100 pos->first = res; 101 pos->last = res; 102 } else { 103 ttm_lru_bulk_move_pos_tail(pos, res); 104 } 105 } 106 107 /* Remove the resource from a bulk_move range */ 108 void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk, 109 struct ttm_resource *res) 110 { 111 struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res); 112 113 if (unlikely(pos->first == res && pos->last == res)) { 114 pos->first = NULL; 115 pos->last = NULL; 116 } else if (pos->first == res) { 117 pos->first = list_next_entry(res, lru); 118 } else if (pos->last == res) { 119 pos->last = list_prev_entry(res, lru); 120 } else { 121 list_move(&res->lru, &pos->last->lru); 122 } 123 } 124 125 /* Move a resource to the LRU or bulk tail */ 126 void ttm_resource_move_to_lru_tail(struct ttm_resource *res) 127 { 128 struct ttm_buffer_object *bo = res->bo; 129 struct ttm_device *bdev = bo->bdev; 130 131 lockdep_assert_held(&bo->bdev->lru_lock); 132 133 if (bo->pin_count) { 134 list_move_tail(&res->lru, &bdev->pinned); 135 136 } else if (bo->bulk_move) { 137 struct ttm_lru_bulk_move_pos *pos = 138 ttm_lru_bulk_move_pos(bo->bulk_move, res); 139 140 ttm_lru_bulk_move_pos_tail(pos, res); 141 } else { 142 struct ttm_resource_manager *man; 143 144 man = ttm_manager_type(bdev, res->mem_type); 145 list_move_tail(&res->lru, &man->lru[bo->priority]); 146 } 147 } 148 149 /** 150 * ttm_resource_init - resource object constructure 151 * @bo: buffer object this resources is allocated for 152 * @place: placement of the resource 153 * @res: the resource object to inistilize 154 * 155 * Initialize a new resource object. Counterpart of ttm_resource_fini(). 156 */ 157 void ttm_resource_init(struct ttm_buffer_object *bo, 158 const struct ttm_place *place, 159 struct ttm_resource *res) 160 { 161 struct ttm_resource_manager *man; 162 163 res->start = 0; 164 res->num_pages = PFN_UP(bo->base.size); 165 res->mem_type = place->mem_type; 166 res->placement = place->flags; 167 res->bus.addr = NULL; 168 res->bus.offset = 0; 169 res->bus.is_iomem = false; 170 res->bus.caching = ttm_cached; 171 res->bo = bo; 172 INIT_LIST_HEAD(&res->lru); 173 174 man = ttm_manager_type(bo->bdev, place->mem_type); 175 spin_lock(&bo->bdev->lru_lock); 176 man->usage += res->num_pages << PAGE_SHIFT; 177 if (bo->bulk_move) 178 ttm_lru_bulk_move_add(bo->bulk_move, res); 179 else 180 ttm_resource_move_to_lru_tail(res); 181 spin_unlock(&bo->bdev->lru_lock); 182 } 183 EXPORT_SYMBOL(ttm_resource_init); 184 185 /** 186 * ttm_resource_fini - resource destructor 187 * @man: the resource manager this resource belongs to 188 * @res: the resource to clean up 189 * 190 * Should be used by resource manager backends to clean up the TTM resource 191 * objects before freeing the underlying structure. Makes sure the resource is 192 * removed from the LRU before destruction. 193 * Counterpart of ttm_resource_init(). 194 */ 195 void ttm_resource_fini(struct ttm_resource_manager *man, 196 struct ttm_resource *res) 197 { 198 struct ttm_device *bdev = man->bdev; 199 200 spin_lock(&bdev->lru_lock); 201 list_del_init(&res->lru); 202 man->usage -= res->num_pages << PAGE_SHIFT; 203 spin_unlock(&bdev->lru_lock); 204 } 205 EXPORT_SYMBOL(ttm_resource_fini); 206 207 int ttm_resource_alloc(struct ttm_buffer_object *bo, 208 const struct ttm_place *place, 209 struct ttm_resource **res_ptr) 210 { 211 struct ttm_resource_manager *man = 212 ttm_manager_type(bo->bdev, place->mem_type); 213 214 return man->func->alloc(man, bo, place, res_ptr); 215 } 216 217 void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res) 218 { 219 struct ttm_resource_manager *man; 220 221 if (!*res) 222 return; 223 224 if (bo->bulk_move) { 225 spin_lock(&bo->bdev->lru_lock); 226 ttm_lru_bulk_move_del(bo->bulk_move, *res); 227 spin_unlock(&bo->bdev->lru_lock); 228 } 229 230 man = ttm_manager_type(bo->bdev, (*res)->mem_type); 231 man->func->free(man, *res); 232 *res = NULL; 233 } 234 EXPORT_SYMBOL(ttm_resource_free); 235 236 static bool ttm_resource_places_compat(struct ttm_resource *res, 237 const struct ttm_place *places, 238 unsigned num_placement) 239 { 240 unsigned i; 241 242 if (res->placement & TTM_PL_FLAG_TEMPORARY) 243 return false; 244 245 for (i = 0; i < num_placement; i++) { 246 const struct ttm_place *heap = &places[i]; 247 248 if (res->start < heap->fpfn || (heap->lpfn && 249 (res->start + res->num_pages) > heap->lpfn)) 250 continue; 251 252 if ((res->mem_type == heap->mem_type) && 253 (!(heap->flags & TTM_PL_FLAG_CONTIGUOUS) || 254 (res->placement & TTM_PL_FLAG_CONTIGUOUS))) 255 return true; 256 } 257 return false; 258 } 259 260 /** 261 * ttm_resource_compat - check if resource is compatible with placement 262 * 263 * @res: the resource to check 264 * @placement: the placement to check against 265 * 266 * Returns true if the placement is compatible. 267 */ 268 bool ttm_resource_compat(struct ttm_resource *res, 269 struct ttm_placement *placement) 270 { 271 if (ttm_resource_places_compat(res, placement->placement, 272 placement->num_placement)) 273 return true; 274 275 if ((placement->busy_placement != placement->placement || 276 placement->num_busy_placement > placement->num_placement) && 277 ttm_resource_places_compat(res, placement->busy_placement, 278 placement->num_busy_placement)) 279 return true; 280 281 return false; 282 } 283 EXPORT_SYMBOL(ttm_resource_compat); 284 285 void ttm_resource_set_bo(struct ttm_resource *res, 286 struct ttm_buffer_object *bo) 287 { 288 spin_lock(&bo->bdev->lru_lock); 289 res->bo = bo; 290 spin_unlock(&bo->bdev->lru_lock); 291 } 292 293 /** 294 * ttm_resource_manager_init 295 * 296 * @man: memory manager object to init 297 * @bdev: ttm device this manager belongs to 298 * @size: size of managed resources in arbitrary units 299 * 300 * Initialise core parts of a manager object. 301 */ 302 void ttm_resource_manager_init(struct ttm_resource_manager *man, 303 struct ttm_device *bdev, 304 uint64_t size) 305 { 306 unsigned i; 307 308 spin_lock_init(&man->move_lock); 309 man->bdev = bdev; 310 man->size = size; 311 man->usage = 0; 312 313 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) 314 INIT_LIST_HEAD(&man->lru[i]); 315 man->move = NULL; 316 } 317 EXPORT_SYMBOL(ttm_resource_manager_init); 318 319 /* 320 * ttm_resource_manager_evict_all 321 * 322 * @bdev - device to use 323 * @man - manager to use 324 * 325 * Evict all the objects out of a memory manager until it is empty. 326 * Part of memory manager cleanup sequence. 327 */ 328 int ttm_resource_manager_evict_all(struct ttm_device *bdev, 329 struct ttm_resource_manager *man) 330 { 331 struct ttm_operation_ctx ctx = { 332 .interruptible = false, 333 .no_wait_gpu = false, 334 .force_alloc = true 335 }; 336 struct dma_fence *fence; 337 int ret; 338 unsigned i; 339 340 /* 341 * Can't use standard list traversal since we're unlocking. 342 */ 343 344 spin_lock(&bdev->lru_lock); 345 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { 346 while (!list_empty(&man->lru[i])) { 347 spin_unlock(&bdev->lru_lock); 348 ret = ttm_mem_evict_first(bdev, man, NULL, &ctx, 349 NULL); 350 if (ret) 351 return ret; 352 spin_lock(&bdev->lru_lock); 353 } 354 } 355 spin_unlock(&bdev->lru_lock); 356 357 spin_lock(&man->move_lock); 358 fence = dma_fence_get(man->move); 359 spin_unlock(&man->move_lock); 360 361 if (fence) { 362 ret = dma_fence_wait(fence, false); 363 dma_fence_put(fence); 364 if (ret) 365 return ret; 366 } 367 368 return 0; 369 } 370 EXPORT_SYMBOL(ttm_resource_manager_evict_all); 371 372 /** 373 * ttm_resource_manager_usage 374 * 375 * @man: A memory manager object. 376 * 377 * Return how many resources are currently used. 378 */ 379 uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man) 380 { 381 uint64_t usage; 382 383 spin_lock(&man->bdev->lru_lock); 384 usage = man->usage; 385 spin_unlock(&man->bdev->lru_lock); 386 return usage; 387 } 388 EXPORT_SYMBOL(ttm_resource_manager_usage); 389 390 /** 391 * ttm_resource_manager_debug 392 * 393 * @man: manager type to dump. 394 * @p: printer to use for debug. 395 */ 396 void ttm_resource_manager_debug(struct ttm_resource_manager *man, 397 struct drm_printer *p) 398 { 399 drm_printf(p, " use_type: %d\n", man->use_type); 400 drm_printf(p, " use_tt: %d\n", man->use_tt); 401 drm_printf(p, " size: %llu\n", man->size); 402 drm_printf(p, " usage: %llu\n", ttm_resource_manager_usage(man)); 403 if (man->func->debug) 404 man->func->debug(man, p); 405 } 406 EXPORT_SYMBOL(ttm_resource_manager_debug); 407 408 /** 409 * ttm_resource_manager_first 410 * 411 * @man: resource manager to iterate over 412 * @cursor: cursor to record the position 413 * 414 * Returns the first resource from the resource manager. 415 */ 416 struct ttm_resource * 417 ttm_resource_manager_first(struct ttm_resource_manager *man, 418 struct ttm_resource_cursor *cursor) 419 { 420 struct ttm_resource *res; 421 422 lockdep_assert_held(&man->bdev->lru_lock); 423 424 for (cursor->priority = 0; cursor->priority < TTM_MAX_BO_PRIORITY; 425 ++cursor->priority) 426 list_for_each_entry(res, &man->lru[cursor->priority], lru) 427 return res; 428 429 return NULL; 430 } 431 432 /** 433 * ttm_resource_manager_next 434 * 435 * @man: resource manager to iterate over 436 * @cursor: cursor to record the position 437 * @res: the current resource pointer 438 * 439 * Returns the next resource from the resource manager. 440 */ 441 struct ttm_resource * 442 ttm_resource_manager_next(struct ttm_resource_manager *man, 443 struct ttm_resource_cursor *cursor, 444 struct ttm_resource *res) 445 { 446 lockdep_assert_held(&man->bdev->lru_lock); 447 448 list_for_each_entry_continue(res, &man->lru[cursor->priority], lru) 449 return res; 450 451 for (++cursor->priority; cursor->priority < TTM_MAX_BO_PRIORITY; 452 ++cursor->priority) 453 list_for_each_entry(res, &man->lru[cursor->priority], lru) 454 return res; 455 456 return NULL; 457 } 458 459 static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter, 460 struct iosys_map *dmap, 461 pgoff_t i) 462 { 463 struct ttm_kmap_iter_iomap *iter_io = 464 container_of(iter, typeof(*iter_io), base); 465 void __iomem *addr; 466 467 retry: 468 while (i >= iter_io->cache.end) { 469 iter_io->cache.sg = iter_io->cache.sg ? 470 sg_next(iter_io->cache.sg) : iter_io->st->sgl; 471 iter_io->cache.i = iter_io->cache.end; 472 iter_io->cache.end += sg_dma_len(iter_io->cache.sg) >> 473 PAGE_SHIFT; 474 iter_io->cache.offs = sg_dma_address(iter_io->cache.sg) - 475 iter_io->start; 476 } 477 478 if (i < iter_io->cache.i) { 479 iter_io->cache.end = 0; 480 iter_io->cache.sg = NULL; 481 goto retry; 482 } 483 484 addr = io_mapping_map_local_wc(iter_io->iomap, iter_io->cache.offs + 485 (((resource_size_t)i - iter_io->cache.i) 486 << PAGE_SHIFT)); 487 iosys_map_set_vaddr_iomem(dmap, addr); 488 } 489 490 static void ttm_kmap_iter_iomap_unmap_local(struct ttm_kmap_iter *iter, 491 struct iosys_map *map) 492 { 493 io_mapping_unmap_local(map->vaddr_iomem); 494 } 495 496 static const struct ttm_kmap_iter_ops ttm_kmap_iter_io_ops = { 497 .map_local = ttm_kmap_iter_iomap_map_local, 498 .unmap_local = ttm_kmap_iter_iomap_unmap_local, 499 .maps_tt = false, 500 }; 501 502 /** 503 * ttm_kmap_iter_iomap_init - Initialize a struct ttm_kmap_iter_iomap 504 * @iter_io: The struct ttm_kmap_iter_iomap to initialize. 505 * @iomap: The struct io_mapping representing the underlying linear io_memory. 506 * @st: sg_table into @iomap, representing the memory of the struct 507 * ttm_resource. 508 * @start: Offset that needs to be subtracted from @st to make 509 * sg_dma_address(st->sgl) - @start == 0 for @iomap start. 510 * 511 * Return: Pointer to the embedded struct ttm_kmap_iter. 512 */ 513 struct ttm_kmap_iter * 514 ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io, 515 struct io_mapping *iomap, 516 struct sg_table *st, 517 resource_size_t start) 518 { 519 iter_io->base.ops = &ttm_kmap_iter_io_ops; 520 iter_io->iomap = iomap; 521 iter_io->st = st; 522 iter_io->start = start; 523 memset(&iter_io->cache, 0, sizeof(iter_io->cache)); 524 525 return &iter_io->base; 526 } 527 EXPORT_SYMBOL(ttm_kmap_iter_iomap_init); 528 529 /** 530 * DOC: Linear io iterator 531 * 532 * This code should die in the not too near future. Best would be if we could 533 * make io-mapping use memremap for all io memory, and have memremap 534 * implement a kmap_local functionality. We could then strip a huge amount of 535 * code. These linear io iterators are implemented to mimic old functionality, 536 * and they don't use kmap_local semantics at all internally. Rather ioremap or 537 * friends, and at least on 32-bit they add global TLB flushes and points 538 * of failure. 539 */ 540 541 static void ttm_kmap_iter_linear_io_map_local(struct ttm_kmap_iter *iter, 542 struct iosys_map *dmap, 543 pgoff_t i) 544 { 545 struct ttm_kmap_iter_linear_io *iter_io = 546 container_of(iter, typeof(*iter_io), base); 547 548 *dmap = iter_io->dmap; 549 iosys_map_incr(dmap, i * PAGE_SIZE); 550 } 551 552 static const struct ttm_kmap_iter_ops ttm_kmap_iter_linear_io_ops = { 553 .map_local = ttm_kmap_iter_linear_io_map_local, 554 .maps_tt = false, 555 }; 556 557 /** 558 * ttm_kmap_iter_linear_io_init - Initialize an iterator for linear io memory 559 * @iter_io: The iterator to initialize 560 * @bdev: The TTM device 561 * @mem: The ttm resource representing the iomap. 562 * 563 * This function is for internal TTM use only. It sets up a memcpy kmap iterator 564 * pointing at a linear chunk of io memory. 565 * 566 * Return: A pointer to the embedded struct ttm_kmap_iter or error pointer on 567 * failure. 568 */ 569 struct ttm_kmap_iter * 570 ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io, 571 struct ttm_device *bdev, 572 struct ttm_resource *mem) 573 { 574 int ret; 575 576 ret = ttm_mem_io_reserve(bdev, mem); 577 if (ret) 578 goto out_err; 579 if (!mem->bus.is_iomem) { 580 ret = -EINVAL; 581 goto out_io_free; 582 } 583 584 if (mem->bus.addr) { 585 iosys_map_set_vaddr(&iter_io->dmap, mem->bus.addr); 586 iter_io->needs_unmap = false; 587 } else { 588 size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT; 589 590 iter_io->needs_unmap = true; 591 memset(&iter_io->dmap, 0, sizeof(iter_io->dmap)); 592 if (mem->bus.caching == ttm_write_combined) 593 iosys_map_set_vaddr_iomem(&iter_io->dmap, 594 ioremap_wc(mem->bus.offset, 595 bus_size)); 596 else if (mem->bus.caching == ttm_cached) 597 iosys_map_set_vaddr(&iter_io->dmap, 598 memremap(mem->bus.offset, bus_size, 599 MEMREMAP_WB | 600 MEMREMAP_WT | 601 MEMREMAP_WC)); 602 603 /* If uncached requested or if mapping cached or wc failed */ 604 if (iosys_map_is_null(&iter_io->dmap)) 605 iosys_map_set_vaddr_iomem(&iter_io->dmap, 606 ioremap(mem->bus.offset, 607 bus_size)); 608 609 if (iosys_map_is_null(&iter_io->dmap)) { 610 ret = -ENOMEM; 611 goto out_io_free; 612 } 613 } 614 615 iter_io->base.ops = &ttm_kmap_iter_linear_io_ops; 616 return &iter_io->base; 617 618 out_io_free: 619 ttm_mem_io_free(bdev, mem); 620 out_err: 621 return ERR_PTR(ret); 622 } 623 624 /** 625 * ttm_kmap_iter_linear_io_fini - Clean up an iterator for linear io memory 626 * @iter_io: The iterator to initialize 627 * @bdev: The TTM device 628 * @mem: The ttm resource representing the iomap. 629 * 630 * This function is for internal TTM use only. It cleans up a memcpy kmap 631 * iterator initialized by ttm_kmap_iter_linear_io_init. 632 */ 633 void 634 ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io, 635 struct ttm_device *bdev, 636 struct ttm_resource *mem) 637 { 638 if (iter_io->needs_unmap && iosys_map_is_set(&iter_io->dmap)) { 639 if (iter_io->dmap.is_iomem) 640 iounmap(iter_io->dmap.vaddr_iomem); 641 else 642 memunmap(iter_io->dmap.vaddr); 643 } 644 645 ttm_mem_io_free(bdev, mem); 646 } 647 648 #if defined(CONFIG_DEBUG_FS) 649 650 static int ttm_resource_manager_show(struct seq_file *m, void *unused) 651 { 652 struct ttm_resource_manager *man = 653 (struct ttm_resource_manager *)m->private; 654 struct drm_printer p = drm_seq_file_printer(m); 655 ttm_resource_manager_debug(man, &p); 656 return 0; 657 } 658 DEFINE_SHOW_ATTRIBUTE(ttm_resource_manager); 659 660 #endif 661 662 /** 663 * ttm_resource_manager_create_debugfs - Create debugfs entry for specified 664 * resource manager. 665 * @man: The TTM resource manager for which the debugfs stats file be creates 666 * @parent: debugfs directory in which the file will reside 667 * @name: The filename to create. 668 * 669 * This function setups up a debugfs file that can be used to look 670 * at debug statistics of the specified ttm_resource_manager. 671 */ 672 void ttm_resource_manager_create_debugfs(struct ttm_resource_manager *man, 673 struct dentry * parent, 674 const char *name) 675 { 676 #if defined(CONFIG_DEBUG_FS) 677 debugfs_create_file(name, 0444, parent, man, &ttm_resource_manager_fops); 678 #endif 679 } 680 EXPORT_SYMBOL(ttm_resource_manager_create_debugfs); 681