1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2020 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: Christian König 24 */ 25 26 /* Pooling of allocated pages is necessary because changing the caching 27 * attributes on x86 of the linear mapping requires a costly cross CPU TLB 28 * invalidate for those addresses. 29 * 30 * Additional to that allocations from the DMA coherent API are pooled as well 31 * cause they are rather slow compared to alloc_pages+map. 32 */ 33 34 #include <linux/module.h> 35 #include <linux/dma-mapping.h> 36 #include <linux/highmem.h> 37 #include <linux/sched/mm.h> 38 39 #ifdef CONFIG_X86 40 #include <asm/set_memory.h> 41 #endif 42 43 #include <drm/ttm/ttm_pool.h> 44 #include <drm/ttm/ttm_bo_driver.h> 45 #include <drm/ttm/ttm_tt.h> 46 47 #include "ttm_module.h" 48 49 /** 50 * struct ttm_pool_dma - Helper object for coherent DMA mappings 51 * 52 * @addr: original DMA address returned for the mapping 53 * @vaddr: original vaddr return for the mapping and order in the lower bits 54 */ 55 struct ttm_pool_dma { 56 dma_addr_t addr; 57 unsigned long vaddr; 58 }; 59 60 static unsigned long page_pool_size; 61 62 MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool"); 63 module_param(page_pool_size, ulong, 0644); 64 65 static atomic_long_t allocated_pages; 66 67 static struct ttm_pool_type global_write_combined[MAX_ORDER]; 68 static struct ttm_pool_type global_uncached[MAX_ORDER]; 69 70 static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER]; 71 static struct ttm_pool_type global_dma32_uncached[MAX_ORDER]; 72 73 static struct mutex shrinker_lock; 74 static struct list_head shrinker_list; 75 static struct shrinker mm_shrinker; 76 77 /* Allocate pages of size 1 << order with the given gfp_flags */ 78 static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags, 79 unsigned int order) 80 { 81 unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS; 82 struct ttm_pool_dma *dma; 83 struct page *p; 84 void *vaddr; 85 86 /* Don't set the __GFP_COMP flag for higher order allocations. 87 * Mapping pages directly into an userspace process and calling 88 * put_page() on a TTM allocated page is illegal. 89 */ 90 if (order) 91 gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | 92 __GFP_KSWAPD_RECLAIM; 93 94 if (!pool->use_dma_alloc) { 95 p = alloc_pages(gfp_flags, order); 96 if (p) 97 p->private = order; 98 return p; 99 } 100 101 dma = kmalloc(sizeof(*dma), GFP_KERNEL); 102 if (!dma) 103 return NULL; 104 105 if (order) 106 attr |= DMA_ATTR_NO_WARN; 107 108 vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE, 109 &dma->addr, gfp_flags, attr); 110 if (!vaddr) 111 goto error_free; 112 113 /* TODO: This is an illegal abuse of the DMA API, but we need to rework 114 * TTM page fault handling and extend the DMA API to clean this up. 115 */ 116 if (is_vmalloc_addr(vaddr)) 117 p = vmalloc_to_page(vaddr); 118 else 119 p = virt_to_page(vaddr); 120 121 dma->vaddr = (unsigned long)vaddr | order; 122 p->private = (unsigned long)dma; 123 return p; 124 125 error_free: 126 kfree(dma); 127 return NULL; 128 } 129 130 /* Reset the caching and pages of size 1 << order */ 131 static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching, 132 unsigned int order, struct page *p) 133 { 134 unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS; 135 struct ttm_pool_dma *dma; 136 void *vaddr; 137 138 #ifdef CONFIG_X86 139 /* We don't care that set_pages_wb is inefficient here. This is only 140 * used when we have to shrink and CPU overhead is irrelevant then. 141 */ 142 if (caching != ttm_cached && !PageHighMem(p)) 143 set_pages_wb(p, 1 << order); 144 #endif 145 146 if (!pool || !pool->use_dma_alloc) { 147 __free_pages(p, order); 148 return; 149 } 150 151 if (order) 152 attr |= DMA_ATTR_NO_WARN; 153 154 dma = (void *)p->private; 155 vaddr = (void *)(dma->vaddr & PAGE_MASK); 156 dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr, 157 attr); 158 kfree(dma); 159 } 160 161 /* Apply a new caching to an array of pages */ 162 static int ttm_pool_apply_caching(struct page **first, struct page **last, 163 enum ttm_caching caching) 164 { 165 #ifdef CONFIG_X86 166 unsigned int num_pages = last - first; 167 168 if (!num_pages) 169 return 0; 170 171 switch (caching) { 172 case ttm_cached: 173 break; 174 case ttm_write_combined: 175 return set_pages_array_wc(first, num_pages); 176 case ttm_uncached: 177 return set_pages_array_uc(first, num_pages); 178 } 179 #endif 180 return 0; 181 } 182 183 /* Map pages of 1 << order size and fill the DMA address array */ 184 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order, 185 struct page *p, dma_addr_t **dma_addr) 186 { 187 dma_addr_t addr; 188 unsigned int i; 189 190 if (pool->use_dma_alloc) { 191 struct ttm_pool_dma *dma = (void *)p->private; 192 193 addr = dma->addr; 194 } else { 195 size_t size = (1ULL << order) * PAGE_SIZE; 196 197 addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL); 198 if (dma_mapping_error(pool->dev, addr)) 199 return -EFAULT; 200 } 201 202 for (i = 1 << order; i ; --i) { 203 *(*dma_addr)++ = addr; 204 addr += PAGE_SIZE; 205 } 206 207 return 0; 208 } 209 210 /* Unmap pages of 1 << order size */ 211 static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr, 212 unsigned int num_pages) 213 { 214 /* Unmapped while freeing the page */ 215 if (pool->use_dma_alloc) 216 return; 217 218 dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT, 219 DMA_BIDIRECTIONAL); 220 } 221 222 /* Give pages into a specific pool_type */ 223 static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p) 224 { 225 unsigned int i, num_pages = 1 << pt->order; 226 227 for (i = 0; i < num_pages; ++i) { 228 if (PageHighMem(p)) 229 clear_highpage(p + i); 230 else 231 clear_page(page_address(p + i)); 232 } 233 234 spin_lock(&pt->lock); 235 list_add(&p->lru, &pt->pages); 236 spin_unlock(&pt->lock); 237 atomic_long_add(1 << pt->order, &allocated_pages); 238 } 239 240 /* Take pages from a specific pool_type, return NULL when nothing available */ 241 static struct page *ttm_pool_type_take(struct ttm_pool_type *pt) 242 { 243 struct page *p; 244 245 spin_lock(&pt->lock); 246 p = list_first_entry_or_null(&pt->pages, typeof(*p), lru); 247 if (p) { 248 atomic_long_sub(1 << pt->order, &allocated_pages); 249 list_del(&p->lru); 250 } 251 spin_unlock(&pt->lock); 252 253 return p; 254 } 255 256 /* Initialize and add a pool type to the global shrinker list */ 257 static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool, 258 enum ttm_caching caching, unsigned int order) 259 { 260 pt->pool = pool; 261 pt->caching = caching; 262 pt->order = order; 263 spin_lock_init(&pt->lock); 264 INIT_LIST_HEAD(&pt->pages); 265 266 mutex_lock(&shrinker_lock); 267 list_add_tail(&pt->shrinker_list, &shrinker_list); 268 mutex_unlock(&shrinker_lock); 269 } 270 271 /* Remove a pool_type from the global shrinker list and free all pages */ 272 static void ttm_pool_type_fini(struct ttm_pool_type *pt) 273 { 274 struct page *p; 275 276 mutex_lock(&shrinker_lock); 277 list_del(&pt->shrinker_list); 278 mutex_unlock(&shrinker_lock); 279 280 while ((p = ttm_pool_type_take(pt))) 281 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); 282 } 283 284 /* Return the pool_type to use for the given caching and order */ 285 static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool, 286 enum ttm_caching caching, 287 unsigned int order) 288 { 289 if (pool->use_dma_alloc) 290 return &pool->caching[caching].orders[order]; 291 292 #ifdef CONFIG_X86 293 switch (caching) { 294 case ttm_write_combined: 295 if (pool->use_dma32) 296 return &global_dma32_write_combined[order]; 297 298 return &global_write_combined[order]; 299 case ttm_uncached: 300 if (pool->use_dma32) 301 return &global_dma32_uncached[order]; 302 303 return &global_uncached[order]; 304 default: 305 break; 306 } 307 #endif 308 309 return NULL; 310 } 311 312 /* Free pages using the global shrinker list */ 313 static unsigned int ttm_pool_shrink(void) 314 { 315 struct ttm_pool_type *pt; 316 unsigned int num_freed; 317 struct page *p; 318 319 mutex_lock(&shrinker_lock); 320 pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list); 321 322 p = ttm_pool_type_take(pt); 323 if (p) { 324 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); 325 num_freed = 1 << pt->order; 326 } else { 327 num_freed = 0; 328 } 329 330 list_move_tail(&pt->shrinker_list, &shrinker_list); 331 mutex_unlock(&shrinker_lock); 332 333 return num_freed; 334 } 335 336 /* Return the allocation order based for a page */ 337 static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p) 338 { 339 if (pool->use_dma_alloc) { 340 struct ttm_pool_dma *dma = (void *)p->private; 341 342 return dma->vaddr & ~PAGE_MASK; 343 } 344 345 return p->private; 346 } 347 348 /** 349 * ttm_pool_alloc - Fill a ttm_tt object 350 * 351 * @pool: ttm_pool to use 352 * @tt: ttm_tt object to fill 353 * @ctx: operation context 354 * 355 * Fill the ttm_tt object with pages and also make sure to DMA map them when 356 * necessary. 357 * 358 * Returns: 0 on successe, negative error code otherwise. 359 */ 360 int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, 361 struct ttm_operation_ctx *ctx) 362 { 363 unsigned long num_pages = tt->num_pages; 364 dma_addr_t *dma_addr = tt->dma_address; 365 struct page **caching = tt->pages; 366 struct page **pages = tt->pages; 367 gfp_t gfp_flags = GFP_USER; 368 unsigned int i, order; 369 struct page *p; 370 int r; 371 372 WARN_ON(!num_pages || ttm_tt_is_populated(tt)); 373 WARN_ON(dma_addr && !pool->dev); 374 375 if (tt->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) 376 gfp_flags |= __GFP_ZERO; 377 378 if (ctx->gfp_retry_mayfail) 379 gfp_flags |= __GFP_RETRY_MAYFAIL; 380 381 if (pool->use_dma32) 382 gfp_flags |= GFP_DMA32; 383 else 384 gfp_flags |= GFP_HIGHUSER; 385 386 for (order = min(MAX_ORDER - 1UL, __fls(num_pages)); num_pages; 387 order = min_t(unsigned int, order, __fls(num_pages))) { 388 bool apply_caching = false; 389 struct ttm_pool_type *pt; 390 391 pt = ttm_pool_select_type(pool, tt->caching, order); 392 p = pt ? ttm_pool_type_take(pt) : NULL; 393 if (p) { 394 apply_caching = true; 395 } else { 396 p = ttm_pool_alloc_page(pool, gfp_flags, order); 397 if (p && PageHighMem(p)) 398 apply_caching = true; 399 } 400 401 if (!p) { 402 if (order) { 403 --order; 404 continue; 405 } 406 r = -ENOMEM; 407 goto error_free_all; 408 } 409 410 if (apply_caching) { 411 r = ttm_pool_apply_caching(caching, pages, 412 tt->caching); 413 if (r) 414 goto error_free_page; 415 caching = pages + (1 << order); 416 } 417 418 if (dma_addr) { 419 r = ttm_pool_map(pool, order, p, &dma_addr); 420 if (r) 421 goto error_free_page; 422 } 423 424 num_pages -= 1 << order; 425 for (i = 1 << order; i; --i) 426 *(pages++) = p++; 427 } 428 429 r = ttm_pool_apply_caching(caching, pages, tt->caching); 430 if (r) 431 goto error_free_all; 432 433 return 0; 434 435 error_free_page: 436 ttm_pool_free_page(pool, tt->caching, order, p); 437 438 error_free_all: 439 num_pages = tt->num_pages - num_pages; 440 for (i = 0; i < num_pages; ) { 441 order = ttm_pool_page_order(pool, tt->pages[i]); 442 ttm_pool_free_page(pool, tt->caching, order, tt->pages[i]); 443 i += 1 << order; 444 } 445 446 return r; 447 } 448 EXPORT_SYMBOL(ttm_pool_alloc); 449 450 /** 451 * ttm_pool_free - Free the backing pages from a ttm_tt object 452 * 453 * @pool: Pool to give pages back to. 454 * @tt: ttm_tt object to unpopulate 455 * 456 * Give the packing pages back to a pool or free them 457 */ 458 void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt) 459 { 460 unsigned int i; 461 462 for (i = 0; i < tt->num_pages; ) { 463 struct page *p = tt->pages[i]; 464 unsigned int order, num_pages; 465 struct ttm_pool_type *pt; 466 467 order = ttm_pool_page_order(pool, p); 468 num_pages = 1ULL << order; 469 if (tt->dma_address) 470 ttm_pool_unmap(pool, tt->dma_address[i], num_pages); 471 472 pt = ttm_pool_select_type(pool, tt->caching, order); 473 if (pt) 474 ttm_pool_type_give(pt, tt->pages[i]); 475 else 476 ttm_pool_free_page(pool, tt->caching, order, 477 tt->pages[i]); 478 479 i += num_pages; 480 } 481 482 while (atomic_long_read(&allocated_pages) > page_pool_size) 483 ttm_pool_shrink(); 484 } 485 EXPORT_SYMBOL(ttm_pool_free); 486 487 /** 488 * ttm_pool_init - Initialize a pool 489 * 490 * @pool: the pool to initialize 491 * @dev: device for DMA allocations and mappings 492 * @use_dma_alloc: true if coherent DMA alloc should be used 493 * @use_dma32: true if GFP_DMA32 should be used 494 * 495 * Initialize the pool and its pool types. 496 */ 497 void ttm_pool_init(struct ttm_pool *pool, struct device *dev, 498 bool use_dma_alloc, bool use_dma32) 499 { 500 unsigned int i, j; 501 502 WARN_ON(!dev && use_dma_alloc); 503 504 pool->dev = dev; 505 pool->use_dma_alloc = use_dma_alloc; 506 pool->use_dma32 = use_dma32; 507 508 if (use_dma_alloc) { 509 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) 510 for (j = 0; j < MAX_ORDER; ++j) 511 ttm_pool_type_init(&pool->caching[i].orders[j], 512 pool, i, j); 513 } 514 } 515 516 /** 517 * ttm_pool_fini - Cleanup a pool 518 * 519 * @pool: the pool to clean up 520 * 521 * Free all pages in the pool and unregister the types from the global 522 * shrinker. 523 */ 524 void ttm_pool_fini(struct ttm_pool *pool) 525 { 526 unsigned int i, j; 527 528 if (pool->use_dma_alloc) { 529 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) 530 for (j = 0; j < MAX_ORDER; ++j) 531 ttm_pool_type_fini(&pool->caching[i].orders[j]); 532 } 533 } 534 535 /* As long as pages are available make sure to release at least one */ 536 static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink, 537 struct shrink_control *sc) 538 { 539 unsigned long num_freed = 0; 540 541 do 542 num_freed += ttm_pool_shrink(); 543 while (!num_freed && atomic_long_read(&allocated_pages)); 544 545 return num_freed; 546 } 547 548 /* Return the number of pages available or SHRINK_EMPTY if we have none */ 549 static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink, 550 struct shrink_control *sc) 551 { 552 unsigned long num_pages = atomic_long_read(&allocated_pages); 553 554 return num_pages ? num_pages : SHRINK_EMPTY; 555 } 556 557 #ifdef CONFIG_DEBUG_FS 558 /* Count the number of pages available in a pool_type */ 559 static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt) 560 { 561 unsigned int count = 0; 562 struct page *p; 563 564 spin_lock(&pt->lock); 565 /* Only used for debugfs, the overhead doesn't matter */ 566 list_for_each_entry(p, &pt->pages, lru) 567 ++count; 568 spin_unlock(&pt->lock); 569 570 return count; 571 } 572 573 /* Print a nice header for the order */ 574 static void ttm_pool_debugfs_header(struct seq_file *m) 575 { 576 unsigned int i; 577 578 seq_puts(m, "\t "); 579 for (i = 0; i < MAX_ORDER; ++i) 580 seq_printf(m, " ---%2u---", i); 581 seq_puts(m, "\n"); 582 } 583 584 /* Dump information about the different pool types */ 585 static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt, 586 struct seq_file *m) 587 { 588 unsigned int i; 589 590 for (i = 0; i < MAX_ORDER; ++i) 591 seq_printf(m, " %8u", ttm_pool_type_count(&pt[i])); 592 seq_puts(m, "\n"); 593 } 594 595 /* Dump the total amount of allocated pages */ 596 static void ttm_pool_debugfs_footer(struct seq_file *m) 597 { 598 seq_printf(m, "\ntotal\t: %8lu of %8lu\n", 599 atomic_long_read(&allocated_pages), page_pool_size); 600 } 601 602 /* Dump the information for the global pools */ 603 static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data) 604 { 605 ttm_pool_debugfs_header(m); 606 607 mutex_lock(&shrinker_lock); 608 seq_puts(m, "wc\t:"); 609 ttm_pool_debugfs_orders(global_write_combined, m); 610 seq_puts(m, "uc\t:"); 611 ttm_pool_debugfs_orders(global_uncached, m); 612 seq_puts(m, "wc 32\t:"); 613 ttm_pool_debugfs_orders(global_dma32_write_combined, m); 614 seq_puts(m, "uc 32\t:"); 615 ttm_pool_debugfs_orders(global_dma32_uncached, m); 616 mutex_unlock(&shrinker_lock); 617 618 ttm_pool_debugfs_footer(m); 619 620 return 0; 621 } 622 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_globals); 623 624 /** 625 * ttm_pool_debugfs - Debugfs dump function for a pool 626 * 627 * @pool: the pool to dump the information for 628 * @m: seq_file to dump to 629 * 630 * Make a debugfs dump with the per pool and global information. 631 */ 632 int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m) 633 { 634 unsigned int i; 635 636 if (!pool->use_dma_alloc) { 637 seq_puts(m, "unused\n"); 638 return 0; 639 } 640 641 ttm_pool_debugfs_header(m); 642 643 mutex_lock(&shrinker_lock); 644 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) { 645 seq_puts(m, "DMA "); 646 switch (i) { 647 case ttm_cached: 648 seq_puts(m, "\t:"); 649 break; 650 case ttm_write_combined: 651 seq_puts(m, "wc\t:"); 652 break; 653 case ttm_uncached: 654 seq_puts(m, "uc\t:"); 655 break; 656 } 657 ttm_pool_debugfs_orders(pool->caching[i].orders, m); 658 } 659 mutex_unlock(&shrinker_lock); 660 661 ttm_pool_debugfs_footer(m); 662 return 0; 663 } 664 EXPORT_SYMBOL(ttm_pool_debugfs); 665 666 /* Test the shrinker functions and dump the result */ 667 static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data) 668 { 669 struct shrink_control sc = { .gfp_mask = GFP_NOFS }; 670 671 fs_reclaim_acquire(GFP_KERNEL); 672 seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(&mm_shrinker, &sc), 673 ttm_pool_shrinker_scan(&mm_shrinker, &sc)); 674 fs_reclaim_release(GFP_KERNEL); 675 676 return 0; 677 } 678 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink); 679 680 #endif 681 682 /** 683 * ttm_pool_mgr_init - Initialize globals 684 * 685 * @num_pages: default number of pages 686 * 687 * Initialize the global locks and lists for the MM shrinker. 688 */ 689 int ttm_pool_mgr_init(unsigned long num_pages) 690 { 691 unsigned int i; 692 693 if (!page_pool_size) 694 page_pool_size = num_pages; 695 696 mutex_init(&shrinker_lock); 697 INIT_LIST_HEAD(&shrinker_list); 698 699 for (i = 0; i < MAX_ORDER; ++i) { 700 ttm_pool_type_init(&global_write_combined[i], NULL, 701 ttm_write_combined, i); 702 ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i); 703 704 ttm_pool_type_init(&global_dma32_write_combined[i], NULL, 705 ttm_write_combined, i); 706 ttm_pool_type_init(&global_dma32_uncached[i], NULL, 707 ttm_uncached, i); 708 } 709 710 #ifdef CONFIG_DEBUG_FS 711 debugfs_create_file("page_pool", 0444, ttm_debugfs_root, NULL, 712 &ttm_pool_debugfs_globals_fops); 713 debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL, 714 &ttm_pool_debugfs_shrink_fops); 715 #endif 716 717 mm_shrinker.count_objects = ttm_pool_shrinker_count; 718 mm_shrinker.scan_objects = ttm_pool_shrinker_scan; 719 mm_shrinker.seeks = 1; 720 return register_shrinker(&mm_shrinker); 721 } 722 723 /** 724 * ttm_pool_mgr_fini - Finalize globals 725 * 726 * Cleanup the global pools and unregister the MM shrinker. 727 */ 728 void ttm_pool_mgr_fini(void) 729 { 730 unsigned int i; 731 732 for (i = 0; i < MAX_ORDER; ++i) { 733 ttm_pool_type_fini(&global_write_combined[i]); 734 ttm_pool_type_fini(&global_uncached[i]); 735 736 ttm_pool_type_fini(&global_dma32_write_combined[i]); 737 ttm_pool_type_fini(&global_dma32_uncached[i]); 738 } 739 740 unregister_shrinker(&mm_shrinker); 741 WARN_ON(!list_empty(&shrinker_list)); 742 } 743