1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2020 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: Christian König 24 */ 25 26 /* Pooling of allocated pages is necessary because changing the caching 27 * attributes on x86 of the linear mapping requires a costly cross CPU TLB 28 * invalidate for those addresses. 29 * 30 * Additional to that allocations from the DMA coherent API are pooled as well 31 * cause they are rather slow compared to alloc_pages+map. 32 */ 33 34 #include <linux/module.h> 35 #include <linux/dma-mapping.h> 36 #include <linux/highmem.h> 37 38 #ifdef CONFIG_X86 39 #include <asm/set_memory.h> 40 #endif 41 42 #include <drm/ttm/ttm_pool.h> 43 #include <drm/ttm/ttm_bo_driver.h> 44 #include <drm/ttm/ttm_tt.h> 45 46 /** 47 * struct ttm_pool_dma - Helper object for coherent DMA mappings 48 * 49 * @addr: original DMA address returned for the mapping 50 * @vaddr: original vaddr return for the mapping and order in the lower bits 51 */ 52 struct ttm_pool_dma { 53 dma_addr_t addr; 54 unsigned long vaddr; 55 }; 56 57 static unsigned long page_pool_size; 58 59 MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool"); 60 module_param(page_pool_size, ulong, 0644); 61 62 static atomic_long_t allocated_pages; 63 64 static struct ttm_pool_type global_write_combined[MAX_ORDER]; 65 static struct ttm_pool_type global_uncached[MAX_ORDER]; 66 67 static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER]; 68 static struct ttm_pool_type global_dma32_uncached[MAX_ORDER]; 69 70 static struct mutex shrinker_lock; 71 static struct list_head shrinker_list; 72 static struct shrinker mm_shrinker; 73 74 /* Allocate pages of size 1 << order with the given gfp_flags */ 75 static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags, 76 unsigned int order) 77 { 78 unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS; 79 struct ttm_pool_dma *dma; 80 struct page *p; 81 void *vaddr; 82 83 /* Don't set the __GFP_COMP flag for higher order allocations. 84 * Mapping pages directly into an userspace process and calling 85 * put_page() on a TTM allocated page is illegal. 86 */ 87 if (order) 88 gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | 89 __GFP_KSWAPD_RECLAIM; 90 91 if (!pool->use_dma_alloc) { 92 p = alloc_pages(gfp_flags, order); 93 if (p) 94 p->private = order; 95 return p; 96 } 97 98 dma = kmalloc(sizeof(*dma), GFP_KERNEL); 99 if (!dma) 100 return NULL; 101 102 if (order) 103 attr |= DMA_ATTR_NO_WARN; 104 105 vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE, 106 &dma->addr, gfp_flags, attr); 107 if (!vaddr) 108 goto error_free; 109 110 /* TODO: This is an illegal abuse of the DMA API, but we need to rework 111 * TTM page fault handling and extend the DMA API to clean this up. 112 */ 113 if (is_vmalloc_addr(vaddr)) 114 p = vmalloc_to_page(vaddr); 115 else 116 p = virt_to_page(vaddr); 117 118 dma->vaddr = (unsigned long)vaddr | order; 119 p->private = (unsigned long)dma; 120 return p; 121 122 error_free: 123 kfree(dma); 124 return NULL; 125 } 126 127 /* Reset the caching and pages of size 1 << order */ 128 static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching, 129 unsigned int order, struct page *p) 130 { 131 unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS; 132 struct ttm_pool_dma *dma; 133 void *vaddr; 134 135 #ifdef CONFIG_X86 136 /* We don't care that set_pages_wb is inefficient here. This is only 137 * used when we have to shrink and CPU overhead is irrelevant then. 138 */ 139 if (caching != ttm_cached && !PageHighMem(p)) 140 set_pages_wb(p, 1 << order); 141 #endif 142 143 if (!pool || !pool->use_dma_alloc) { 144 __free_pages(p, order); 145 return; 146 } 147 148 if (order) 149 attr |= DMA_ATTR_NO_WARN; 150 151 dma = (void *)p->private; 152 vaddr = (void *)(dma->vaddr & PAGE_MASK); 153 dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr, 154 attr); 155 kfree(dma); 156 } 157 158 /* Apply a new caching to an array of pages */ 159 static int ttm_pool_apply_caching(struct page **first, struct page **last, 160 enum ttm_caching caching) 161 { 162 #ifdef CONFIG_X86 163 unsigned int num_pages = last - first; 164 165 if (!num_pages) 166 return 0; 167 168 switch (caching) { 169 case ttm_cached: 170 break; 171 case ttm_write_combined: 172 return set_pages_array_wc(first, num_pages); 173 case ttm_uncached: 174 return set_pages_array_uc(first, num_pages); 175 } 176 #endif 177 return 0; 178 } 179 180 /* Map pages of 1 << order size and fill the DMA address array */ 181 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order, 182 struct page *p, dma_addr_t **dma_addr) 183 { 184 dma_addr_t addr; 185 unsigned int i; 186 187 if (pool->use_dma_alloc) { 188 struct ttm_pool_dma *dma = (void *)p->private; 189 190 addr = dma->addr; 191 } else { 192 size_t size = (1ULL << order) * PAGE_SIZE; 193 194 addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL); 195 if (dma_mapping_error(pool->dev, addr)) 196 return -EFAULT; 197 } 198 199 for (i = 1 << order; i ; --i) { 200 *(*dma_addr)++ = addr; 201 addr += PAGE_SIZE; 202 } 203 204 return 0; 205 } 206 207 /* Unmap pages of 1 << order size */ 208 static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr, 209 unsigned int num_pages) 210 { 211 /* Unmapped while freeing the page */ 212 if (pool->use_dma_alloc) 213 return; 214 215 dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT, 216 DMA_BIDIRECTIONAL); 217 } 218 219 /* Give pages into a specific pool_type */ 220 static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p) 221 { 222 unsigned int i, num_pages = 1 << pt->order; 223 224 for (i = 0; i < num_pages; ++i) { 225 if (PageHighMem(p)) 226 clear_highpage(p + i); 227 else 228 clear_page(page_address(p + i)); 229 } 230 231 spin_lock(&pt->lock); 232 list_add(&p->lru, &pt->pages); 233 spin_unlock(&pt->lock); 234 atomic_long_add(1 << pt->order, &allocated_pages); 235 } 236 237 /* Take pages from a specific pool_type, return NULL when nothing available */ 238 static struct page *ttm_pool_type_take(struct ttm_pool_type *pt) 239 { 240 struct page *p; 241 242 spin_lock(&pt->lock); 243 p = list_first_entry_or_null(&pt->pages, typeof(*p), lru); 244 if (p) { 245 atomic_long_sub(1 << pt->order, &allocated_pages); 246 list_del(&p->lru); 247 } 248 spin_unlock(&pt->lock); 249 250 return p; 251 } 252 253 /* Initialize and add a pool type to the global shrinker list */ 254 static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool, 255 enum ttm_caching caching, unsigned int order) 256 { 257 pt->pool = pool; 258 pt->caching = caching; 259 pt->order = order; 260 spin_lock_init(&pt->lock); 261 INIT_LIST_HEAD(&pt->pages); 262 263 mutex_lock(&shrinker_lock); 264 list_add_tail(&pt->shrinker_list, &shrinker_list); 265 mutex_unlock(&shrinker_lock); 266 } 267 268 /* Remove a pool_type from the global shrinker list and free all pages */ 269 static void ttm_pool_type_fini(struct ttm_pool_type *pt) 270 { 271 struct page *p, *tmp; 272 273 mutex_lock(&shrinker_lock); 274 list_del(&pt->shrinker_list); 275 mutex_unlock(&shrinker_lock); 276 277 list_for_each_entry_safe(p, tmp, &pt->pages, lru) 278 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); 279 } 280 281 /* Return the pool_type to use for the given caching and order */ 282 static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool, 283 enum ttm_caching caching, 284 unsigned int order) 285 { 286 if (pool->use_dma_alloc) 287 return &pool->caching[caching].orders[order]; 288 289 #ifdef CONFIG_X86 290 switch (caching) { 291 case ttm_write_combined: 292 if (pool->use_dma32) 293 return &global_dma32_write_combined[order]; 294 295 return &global_write_combined[order]; 296 case ttm_uncached: 297 if (pool->use_dma32) 298 return &global_dma32_uncached[order]; 299 300 return &global_uncached[order]; 301 default: 302 break; 303 } 304 #endif 305 306 return NULL; 307 } 308 309 /* Free pages using the global shrinker list */ 310 static unsigned int ttm_pool_shrink(void) 311 { 312 struct ttm_pool_type *pt; 313 unsigned int num_freed; 314 struct page *p; 315 316 mutex_lock(&shrinker_lock); 317 pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list); 318 319 p = ttm_pool_type_take(pt); 320 if (p) { 321 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); 322 num_freed = 1 << pt->order; 323 } else { 324 num_freed = 0; 325 } 326 327 list_move_tail(&pt->shrinker_list, &shrinker_list); 328 mutex_unlock(&shrinker_lock); 329 330 return num_freed; 331 } 332 333 /* Return the allocation order based for a page */ 334 static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p) 335 { 336 if (pool->use_dma_alloc) { 337 struct ttm_pool_dma *dma = (void *)p->private; 338 339 return dma->vaddr & ~PAGE_MASK; 340 } 341 342 return p->private; 343 } 344 345 /** 346 * ttm_pool_alloc - Fill a ttm_tt object 347 * 348 * @pool: ttm_pool to use 349 * @tt: ttm_tt object to fill 350 * @ctx: operation context 351 * 352 * Fill the ttm_tt object with pages and also make sure to DMA map them when 353 * necessary. 354 * 355 * Returns: 0 on successe, negative error code otherwise. 356 */ 357 int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, 358 struct ttm_operation_ctx *ctx) 359 { 360 unsigned long num_pages = tt->num_pages; 361 dma_addr_t *dma_addr = tt->dma_address; 362 struct page **caching = tt->pages; 363 struct page **pages = tt->pages; 364 gfp_t gfp_flags = GFP_USER; 365 unsigned int i, order; 366 struct page *p; 367 int r; 368 369 WARN_ON(!num_pages || ttm_tt_is_populated(tt)); 370 WARN_ON(dma_addr && !pool->dev); 371 372 if (tt->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) 373 gfp_flags |= __GFP_ZERO; 374 375 if (ctx->gfp_retry_mayfail) 376 gfp_flags |= __GFP_RETRY_MAYFAIL; 377 378 if (pool->use_dma32) 379 gfp_flags |= GFP_DMA32; 380 else 381 gfp_flags |= GFP_HIGHUSER; 382 383 for (order = min(MAX_ORDER - 1UL, __fls(num_pages)); num_pages; 384 order = min_t(unsigned int, order, __fls(num_pages))) { 385 bool apply_caching = false; 386 struct ttm_pool_type *pt; 387 388 pt = ttm_pool_select_type(pool, tt->caching, order); 389 p = pt ? ttm_pool_type_take(pt) : NULL; 390 if (p) { 391 apply_caching = true; 392 } else { 393 p = ttm_pool_alloc_page(pool, gfp_flags, order); 394 if (p && PageHighMem(p)) 395 apply_caching = true; 396 } 397 398 if (!p) { 399 if (order) { 400 --order; 401 continue; 402 } 403 r = -ENOMEM; 404 goto error_free_all; 405 } 406 407 if (apply_caching) { 408 r = ttm_pool_apply_caching(caching, pages, 409 tt->caching); 410 if (r) 411 goto error_free_page; 412 caching = pages + (1 << order); 413 } 414 415 r = ttm_mem_global_alloc_page(&ttm_mem_glob, p, 416 (1 << order) * PAGE_SIZE, 417 ctx); 418 if (r) 419 goto error_free_page; 420 421 if (dma_addr) { 422 r = ttm_pool_map(pool, order, p, &dma_addr); 423 if (r) 424 goto error_global_free; 425 } 426 427 num_pages -= 1 << order; 428 for (i = 1 << order; i; --i) 429 *(pages++) = p++; 430 } 431 432 r = ttm_pool_apply_caching(caching, pages, tt->caching); 433 if (r) 434 goto error_free_all; 435 436 return 0; 437 438 error_global_free: 439 ttm_mem_global_free_page(&ttm_mem_glob, p, (1 << order) * PAGE_SIZE); 440 441 error_free_page: 442 ttm_pool_free_page(pool, tt->caching, order, p); 443 444 error_free_all: 445 num_pages = tt->num_pages - num_pages; 446 for (i = 0; i < num_pages; ) { 447 order = ttm_pool_page_order(pool, tt->pages[i]); 448 ttm_pool_free_page(pool, tt->caching, order, tt->pages[i]); 449 i += 1 << order; 450 } 451 452 return r; 453 } 454 EXPORT_SYMBOL(ttm_pool_alloc); 455 456 /** 457 * ttm_pool_free - Free the backing pages from a ttm_tt object 458 * 459 * @pool: Pool to give pages back to. 460 * @tt: ttm_tt object to unpopulate 461 * 462 * Give the packing pages back to a pool or free them 463 */ 464 void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt) 465 { 466 unsigned int i; 467 468 for (i = 0; i < tt->num_pages; ) { 469 struct page *p = tt->pages[i]; 470 unsigned int order, num_pages; 471 struct ttm_pool_type *pt; 472 473 order = ttm_pool_page_order(pool, p); 474 num_pages = 1ULL << order; 475 ttm_mem_global_free_page(&ttm_mem_glob, p, 476 num_pages * PAGE_SIZE); 477 if (tt->dma_address) 478 ttm_pool_unmap(pool, tt->dma_address[i], num_pages); 479 480 pt = ttm_pool_select_type(pool, tt->caching, order); 481 if (pt) 482 ttm_pool_type_give(pt, tt->pages[i]); 483 else 484 ttm_pool_free_page(pool, tt->caching, order, 485 tt->pages[i]); 486 487 i += num_pages; 488 } 489 490 while (atomic_long_read(&allocated_pages) > page_pool_size) 491 ttm_pool_shrink(); 492 } 493 EXPORT_SYMBOL(ttm_pool_free); 494 495 /** 496 * ttm_pool_init - Initialize a pool 497 * 498 * @pool: the pool to initialize 499 * @dev: device for DMA allocations and mappings 500 * @use_dma_alloc: true if coherent DMA alloc should be used 501 * @use_dma32: true if GFP_DMA32 should be used 502 * 503 * Initialize the pool and its pool types. 504 */ 505 void ttm_pool_init(struct ttm_pool *pool, struct device *dev, 506 bool use_dma_alloc, bool use_dma32) 507 { 508 unsigned int i, j; 509 510 WARN_ON(!dev && use_dma_alloc); 511 512 pool->dev = dev; 513 pool->use_dma_alloc = use_dma_alloc; 514 pool->use_dma32 = use_dma32; 515 516 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) 517 for (j = 0; j < MAX_ORDER; ++j) 518 ttm_pool_type_init(&pool->caching[i].orders[j], 519 pool, i, j); 520 } 521 522 /** 523 * ttm_pool_fini - Cleanup a pool 524 * 525 * @pool: the pool to clean up 526 * 527 * Free all pages in the pool and unregister the types from the global 528 * shrinker. 529 */ 530 void ttm_pool_fini(struct ttm_pool *pool) 531 { 532 unsigned int i, j; 533 534 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) 535 for (j = 0; j < MAX_ORDER; ++j) 536 ttm_pool_type_fini(&pool->caching[i].orders[j]); 537 } 538 539 #ifdef CONFIG_DEBUG_FS 540 /* Count the number of pages available in a pool_type */ 541 static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt) 542 { 543 unsigned int count = 0; 544 struct page *p; 545 546 spin_lock(&pt->lock); 547 /* Only used for debugfs, the overhead doesn't matter */ 548 list_for_each_entry(p, &pt->pages, lru) 549 ++count; 550 spin_unlock(&pt->lock); 551 552 return count; 553 } 554 555 /* Dump information about the different pool types */ 556 static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt, 557 struct seq_file *m) 558 { 559 unsigned int i; 560 561 for (i = 0; i < MAX_ORDER; ++i) 562 seq_printf(m, " %8u", ttm_pool_type_count(&pt[i])); 563 seq_puts(m, "\n"); 564 } 565 566 /** 567 * ttm_pool_debugfs - Debugfs dump function for a pool 568 * 569 * @pool: the pool to dump the information for 570 * @m: seq_file to dump to 571 * 572 * Make a debugfs dump with the per pool and global information. 573 */ 574 int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m) 575 { 576 unsigned int i; 577 578 mutex_lock(&shrinker_lock); 579 580 seq_puts(m, "\t "); 581 for (i = 0; i < MAX_ORDER; ++i) 582 seq_printf(m, " ---%2u---", i); 583 seq_puts(m, "\n"); 584 585 seq_puts(m, "wc\t:"); 586 ttm_pool_debugfs_orders(global_write_combined, m); 587 seq_puts(m, "uc\t:"); 588 ttm_pool_debugfs_orders(global_uncached, m); 589 590 seq_puts(m, "wc 32\t:"); 591 ttm_pool_debugfs_orders(global_dma32_write_combined, m); 592 seq_puts(m, "uc 32\t:"); 593 ttm_pool_debugfs_orders(global_dma32_uncached, m); 594 595 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) { 596 seq_puts(m, "DMA "); 597 switch (i) { 598 case ttm_cached: 599 seq_puts(m, "\t:"); 600 break; 601 case ttm_write_combined: 602 seq_puts(m, "wc\t:"); 603 break; 604 case ttm_uncached: 605 seq_puts(m, "uc\t:"); 606 break; 607 } 608 ttm_pool_debugfs_orders(pool->caching[i].orders, m); 609 } 610 611 seq_printf(m, "\ntotal\t: %8lu of %8lu\n", 612 atomic_long_read(&allocated_pages), page_pool_size); 613 614 mutex_unlock(&shrinker_lock); 615 616 return 0; 617 } 618 EXPORT_SYMBOL(ttm_pool_debugfs); 619 620 #endif 621 622 /* As long as pages are available make sure to release at least one */ 623 static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink, 624 struct shrink_control *sc) 625 { 626 unsigned long num_freed = 0; 627 628 do 629 num_freed += ttm_pool_shrink(); 630 while (!num_freed && atomic_long_read(&allocated_pages)); 631 632 return num_freed; 633 } 634 635 /* Return the number of pages available or SHRINK_EMPTY if we have none */ 636 static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink, 637 struct shrink_control *sc) 638 { 639 unsigned long num_pages = atomic_long_read(&allocated_pages); 640 641 return num_pages ? num_pages : SHRINK_EMPTY; 642 } 643 644 /** 645 * ttm_pool_mgr_init - Initialize globals 646 * 647 * @num_pages: default number of pages 648 * 649 * Initialize the global locks and lists for the MM shrinker. 650 */ 651 int ttm_pool_mgr_init(unsigned long num_pages) 652 { 653 unsigned int i; 654 655 if (!page_pool_size) 656 page_pool_size = num_pages; 657 658 mutex_init(&shrinker_lock); 659 INIT_LIST_HEAD(&shrinker_list); 660 661 for (i = 0; i < MAX_ORDER; ++i) { 662 ttm_pool_type_init(&global_write_combined[i], NULL, 663 ttm_write_combined, i); 664 ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i); 665 666 ttm_pool_type_init(&global_dma32_write_combined[i], NULL, 667 ttm_write_combined, i); 668 ttm_pool_type_init(&global_dma32_uncached[i], NULL, 669 ttm_uncached, i); 670 } 671 672 mm_shrinker.count_objects = ttm_pool_shrinker_count; 673 mm_shrinker.scan_objects = ttm_pool_shrinker_scan; 674 mm_shrinker.seeks = 1; 675 return register_shrinker(&mm_shrinker); 676 } 677 678 /** 679 * ttm_pool_mgr_fini - Finalize globals 680 * 681 * Cleanup the global pools and unregister the MM shrinker. 682 */ 683 void ttm_pool_mgr_fini(void) 684 { 685 unsigned int i; 686 687 for (i = 0; i < MAX_ORDER; ++i) { 688 ttm_pool_type_fini(&global_write_combined[i]); 689 ttm_pool_type_fini(&global_uncached[i]); 690 691 ttm_pool_type_fini(&global_dma32_write_combined[i]); 692 ttm_pool_type_fini(&global_dma32_uncached[i]); 693 } 694 695 unregister_shrinker(&mm_shrinker); 696 WARN_ON(!list_empty(&shrinker_list)); 697 } 698