xref: /openbmc/linux/drivers/gpu/drm/ttm/ttm_pool.c (revision e4c881d2)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2020 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Christian König
24  */
25 
26 /* Pooling of allocated pages is necessary because changing the caching
27  * attributes on x86 of the linear mapping requires a costly cross CPU TLB
28  * invalidate for those addresses.
29  *
30  * Additional to that allocations from the DMA coherent API are pooled as well
31  * cause they are rather slow compared to alloc_pages+map.
32  */
33 
34 #include <linux/module.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/debugfs.h>
37 #include <linux/highmem.h>
38 #include <linux/sched/mm.h>
39 
40 #ifdef CONFIG_X86
41 #include <asm/set_memory.h>
42 #endif
43 
44 #include <drm/ttm/ttm_pool.h>
45 #include <drm/ttm/ttm_tt.h>
46 #include <drm/ttm/ttm_bo.h>
47 
48 #include "ttm_module.h"
49 
50 /**
51  * struct ttm_pool_dma - Helper object for coherent DMA mappings
52  *
53  * @addr: original DMA address returned for the mapping
54  * @vaddr: original vaddr return for the mapping and order in the lower bits
55  */
56 struct ttm_pool_dma {
57 	dma_addr_t addr;
58 	unsigned long vaddr;
59 };
60 
61 static unsigned long page_pool_size;
62 
63 MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
64 module_param(page_pool_size, ulong, 0644);
65 
66 static atomic_long_t allocated_pages;
67 
68 static struct ttm_pool_type global_write_combined[NR_PAGE_ORDERS];
69 static struct ttm_pool_type global_uncached[NR_PAGE_ORDERS];
70 
71 static struct ttm_pool_type global_dma32_write_combined[NR_PAGE_ORDERS];
72 static struct ttm_pool_type global_dma32_uncached[NR_PAGE_ORDERS];
73 
74 static spinlock_t shrinker_lock;
75 static struct list_head shrinker_list;
76 static struct shrinker mm_shrinker;
77 
78 /* Allocate pages of size 1 << order with the given gfp_flags */
79 static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
80 					unsigned int order)
81 {
82 	unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
83 	struct ttm_pool_dma *dma;
84 	struct page *p;
85 	void *vaddr;
86 
87 	/* Don't set the __GFP_COMP flag for higher order allocations.
88 	 * Mapping pages directly into an userspace process and calling
89 	 * put_page() on a TTM allocated page is illegal.
90 	 */
91 	if (order)
92 		gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
93 			__GFP_KSWAPD_RECLAIM;
94 
95 	if (!pool->use_dma_alloc) {
96 		p = alloc_pages_node(pool->nid, gfp_flags, order);
97 		if (p)
98 			p->private = order;
99 		return p;
100 	}
101 
102 	dma = kmalloc(sizeof(*dma), GFP_KERNEL);
103 	if (!dma)
104 		return NULL;
105 
106 	if (order)
107 		attr |= DMA_ATTR_NO_WARN;
108 
109 	vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
110 				&dma->addr, gfp_flags, attr);
111 	if (!vaddr)
112 		goto error_free;
113 
114 	/* TODO: This is an illegal abuse of the DMA API, but we need to rework
115 	 * TTM page fault handling and extend the DMA API to clean this up.
116 	 */
117 	if (is_vmalloc_addr(vaddr))
118 		p = vmalloc_to_page(vaddr);
119 	else
120 		p = virt_to_page(vaddr);
121 
122 	dma->vaddr = (unsigned long)vaddr | order;
123 	p->private = (unsigned long)dma;
124 	return p;
125 
126 error_free:
127 	kfree(dma);
128 	return NULL;
129 }
130 
131 /* Reset the caching and pages of size 1 << order */
132 static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
133 			       unsigned int order, struct page *p)
134 {
135 	unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
136 	struct ttm_pool_dma *dma;
137 	void *vaddr;
138 
139 #ifdef CONFIG_X86
140 	/* We don't care that set_pages_wb is inefficient here. This is only
141 	 * used when we have to shrink and CPU overhead is irrelevant then.
142 	 */
143 	if (caching != ttm_cached && !PageHighMem(p))
144 		set_pages_wb(p, 1 << order);
145 #endif
146 
147 	if (!pool || !pool->use_dma_alloc) {
148 		__free_pages(p, order);
149 		return;
150 	}
151 
152 	if (order)
153 		attr |= DMA_ATTR_NO_WARN;
154 
155 	dma = (void *)p->private;
156 	vaddr = (void *)(dma->vaddr & PAGE_MASK);
157 	dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
158 		       attr);
159 	kfree(dma);
160 }
161 
162 /* Apply a new caching to an array of pages */
163 static int ttm_pool_apply_caching(struct page **first, struct page **last,
164 				  enum ttm_caching caching)
165 {
166 #ifdef CONFIG_X86
167 	unsigned int num_pages = last - first;
168 
169 	if (!num_pages)
170 		return 0;
171 
172 	switch (caching) {
173 	case ttm_cached:
174 		break;
175 	case ttm_write_combined:
176 		return set_pages_array_wc(first, num_pages);
177 	case ttm_uncached:
178 		return set_pages_array_uc(first, num_pages);
179 	}
180 #endif
181 	return 0;
182 }
183 
184 /* Map pages of 1 << order size and fill the DMA address array  */
185 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
186 			struct page *p, dma_addr_t **dma_addr)
187 {
188 	dma_addr_t addr;
189 	unsigned int i;
190 
191 	if (pool->use_dma_alloc) {
192 		struct ttm_pool_dma *dma = (void *)p->private;
193 
194 		addr = dma->addr;
195 	} else {
196 		size_t size = (1ULL << order) * PAGE_SIZE;
197 
198 		addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
199 		if (dma_mapping_error(pool->dev, addr))
200 			return -EFAULT;
201 	}
202 
203 	for (i = 1 << order; i ; --i) {
204 		*(*dma_addr)++ = addr;
205 		addr += PAGE_SIZE;
206 	}
207 
208 	return 0;
209 }
210 
211 /* Unmap pages of 1 << order size */
212 static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
213 			   unsigned int num_pages)
214 {
215 	/* Unmapped while freeing the page */
216 	if (pool->use_dma_alloc)
217 		return;
218 
219 	dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
220 		       DMA_BIDIRECTIONAL);
221 }
222 
223 /* Give pages into a specific pool_type */
224 static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
225 {
226 	unsigned int i, num_pages = 1 << pt->order;
227 
228 	for (i = 0; i < num_pages; ++i) {
229 		if (PageHighMem(p))
230 			clear_highpage(p + i);
231 		else
232 			clear_page(page_address(p + i));
233 	}
234 
235 	spin_lock(&pt->lock);
236 	list_add(&p->lru, &pt->pages);
237 	spin_unlock(&pt->lock);
238 	atomic_long_add(1 << pt->order, &allocated_pages);
239 }
240 
241 /* Take pages from a specific pool_type, return NULL when nothing available */
242 static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
243 {
244 	struct page *p;
245 
246 	spin_lock(&pt->lock);
247 	p = list_first_entry_or_null(&pt->pages, typeof(*p), lru);
248 	if (p) {
249 		atomic_long_sub(1 << pt->order, &allocated_pages);
250 		list_del(&p->lru);
251 	}
252 	spin_unlock(&pt->lock);
253 
254 	return p;
255 }
256 
257 /* Initialize and add a pool type to the global shrinker list */
258 static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
259 			       enum ttm_caching caching, unsigned int order)
260 {
261 	pt->pool = pool;
262 	pt->caching = caching;
263 	pt->order = order;
264 	spin_lock_init(&pt->lock);
265 	INIT_LIST_HEAD(&pt->pages);
266 
267 	spin_lock(&shrinker_lock);
268 	list_add_tail(&pt->shrinker_list, &shrinker_list);
269 	spin_unlock(&shrinker_lock);
270 }
271 
272 /* Remove a pool_type from the global shrinker list and free all pages */
273 static void ttm_pool_type_fini(struct ttm_pool_type *pt)
274 {
275 	struct page *p;
276 
277 	spin_lock(&shrinker_lock);
278 	list_del(&pt->shrinker_list);
279 	spin_unlock(&shrinker_lock);
280 
281 	while ((p = ttm_pool_type_take(pt)))
282 		ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
283 }
284 
285 /* Return the pool_type to use for the given caching and order */
286 static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
287 						  enum ttm_caching caching,
288 						  unsigned int order)
289 {
290 	if (pool->use_dma_alloc)
291 		return &pool->caching[caching].orders[order];
292 
293 #ifdef CONFIG_X86
294 	switch (caching) {
295 	case ttm_write_combined:
296 		if (pool->nid != NUMA_NO_NODE)
297 			return &pool->caching[caching].orders[order];
298 
299 		if (pool->use_dma32)
300 			return &global_dma32_write_combined[order];
301 
302 		return &global_write_combined[order];
303 	case ttm_uncached:
304 		if (pool->nid != NUMA_NO_NODE)
305 			return &pool->caching[caching].orders[order];
306 
307 		if (pool->use_dma32)
308 			return &global_dma32_uncached[order];
309 
310 		return &global_uncached[order];
311 	default:
312 		break;
313 	}
314 #endif
315 
316 	return NULL;
317 }
318 
319 /* Free pages using the global shrinker list */
320 static unsigned int ttm_pool_shrink(void)
321 {
322 	struct ttm_pool_type *pt;
323 	unsigned int num_pages;
324 	struct page *p;
325 
326 	spin_lock(&shrinker_lock);
327 	pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
328 	list_move_tail(&pt->shrinker_list, &shrinker_list);
329 	spin_unlock(&shrinker_lock);
330 
331 	p = ttm_pool_type_take(pt);
332 	if (p) {
333 		ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
334 		num_pages = 1 << pt->order;
335 	} else {
336 		num_pages = 0;
337 	}
338 
339 	return num_pages;
340 }
341 
342 /* Return the allocation order based for a page */
343 static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
344 {
345 	if (pool->use_dma_alloc) {
346 		struct ttm_pool_dma *dma = (void *)p->private;
347 
348 		return dma->vaddr & ~PAGE_MASK;
349 	}
350 
351 	return p->private;
352 }
353 
354 /* Called when we got a page, either from a pool or newly allocated */
355 static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
356 				   struct page *p, dma_addr_t **dma_addr,
357 				   unsigned long *num_pages,
358 				   struct page ***pages)
359 {
360 	unsigned int i;
361 	int r;
362 
363 	if (*dma_addr) {
364 		r = ttm_pool_map(pool, order, p, dma_addr);
365 		if (r)
366 			return r;
367 	}
368 
369 	*num_pages -= 1 << order;
370 	for (i = 1 << order; i; --i, ++(*pages), ++p)
371 		**pages = p;
372 
373 	return 0;
374 }
375 
376 /**
377  * ttm_pool_free_range() - Free a range of TTM pages
378  * @pool: The pool used for allocating.
379  * @tt: The struct ttm_tt holding the page pointers.
380  * @caching: The page caching mode used by the range.
381  * @start_page: index for first page to free.
382  * @end_page: index for last page to free + 1.
383  *
384  * During allocation the ttm_tt page-vector may be populated with ranges of
385  * pages with different attributes if allocation hit an error without being
386  * able to completely fulfill the allocation. This function can be used
387  * to free these individual ranges.
388  */
389 static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
390 				enum ttm_caching caching,
391 				pgoff_t start_page, pgoff_t end_page)
392 {
393 	struct page **pages = &tt->pages[start_page];
394 	unsigned int order;
395 	pgoff_t i, nr;
396 
397 	for (i = start_page; i < end_page; i += nr, pages += nr) {
398 		struct ttm_pool_type *pt = NULL;
399 
400 		order = ttm_pool_page_order(pool, *pages);
401 		nr = (1UL << order);
402 		if (tt->dma_address)
403 			ttm_pool_unmap(pool, tt->dma_address[i], nr);
404 
405 		pt = ttm_pool_select_type(pool, caching, order);
406 		if (pt)
407 			ttm_pool_type_give(pt, *pages);
408 		else
409 			ttm_pool_free_page(pool, caching, order, *pages);
410 	}
411 }
412 
413 /**
414  * ttm_pool_alloc - Fill a ttm_tt object
415  *
416  * @pool: ttm_pool to use
417  * @tt: ttm_tt object to fill
418  * @ctx: operation context
419  *
420  * Fill the ttm_tt object with pages and also make sure to DMA map them when
421  * necessary.
422  *
423  * Returns: 0 on successe, negative error code otherwise.
424  */
425 int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
426 		   struct ttm_operation_ctx *ctx)
427 {
428 	pgoff_t num_pages = tt->num_pages;
429 	dma_addr_t *dma_addr = tt->dma_address;
430 	struct page **caching = tt->pages;
431 	struct page **pages = tt->pages;
432 	enum ttm_caching page_caching;
433 	gfp_t gfp_flags = GFP_USER;
434 	pgoff_t caching_divide;
435 	unsigned int order;
436 	struct page *p;
437 	int r;
438 
439 	WARN_ON(!num_pages || ttm_tt_is_populated(tt));
440 	WARN_ON(dma_addr && !pool->dev);
441 
442 	if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
443 		gfp_flags |= __GFP_ZERO;
444 
445 	if (ctx->gfp_retry_mayfail)
446 		gfp_flags |= __GFP_RETRY_MAYFAIL;
447 
448 	if (pool->use_dma32)
449 		gfp_flags |= GFP_DMA32;
450 	else
451 		gfp_flags |= GFP_HIGHUSER;
452 
453 	for (order = min_t(unsigned int, MAX_ORDER, __fls(num_pages));
454 	     num_pages;
455 	     order = min_t(unsigned int, order, __fls(num_pages))) {
456 		struct ttm_pool_type *pt;
457 
458 		page_caching = tt->caching;
459 		pt = ttm_pool_select_type(pool, tt->caching, order);
460 		p = pt ? ttm_pool_type_take(pt) : NULL;
461 		if (p) {
462 			r = ttm_pool_apply_caching(caching, pages,
463 						   tt->caching);
464 			if (r)
465 				goto error_free_page;
466 
467 			caching = pages;
468 			do {
469 				r = ttm_pool_page_allocated(pool, order, p,
470 							    &dma_addr,
471 							    &num_pages,
472 							    &pages);
473 				if (r)
474 					goto error_free_page;
475 
476 				caching = pages;
477 				if (num_pages < (1 << order))
478 					break;
479 
480 				p = ttm_pool_type_take(pt);
481 			} while (p);
482 		}
483 
484 		page_caching = ttm_cached;
485 		while (num_pages >= (1 << order) &&
486 		       (p = ttm_pool_alloc_page(pool, gfp_flags, order))) {
487 
488 			if (PageHighMem(p)) {
489 				r = ttm_pool_apply_caching(caching, pages,
490 							   tt->caching);
491 				if (r)
492 					goto error_free_page;
493 				caching = pages;
494 			}
495 			r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
496 						    &num_pages, &pages);
497 			if (r)
498 				goto error_free_page;
499 			if (PageHighMem(p))
500 				caching = pages;
501 		}
502 
503 		if (!p) {
504 			if (order) {
505 				--order;
506 				continue;
507 			}
508 			r = -ENOMEM;
509 			goto error_free_all;
510 		}
511 	}
512 
513 	r = ttm_pool_apply_caching(caching, pages, tt->caching);
514 	if (r)
515 		goto error_free_all;
516 
517 	return 0;
518 
519 error_free_page:
520 	ttm_pool_free_page(pool, page_caching, order, p);
521 
522 error_free_all:
523 	num_pages = tt->num_pages - num_pages;
524 	caching_divide = caching - tt->pages;
525 	ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide);
526 	ttm_pool_free_range(pool, tt, ttm_cached, caching_divide, num_pages);
527 
528 	return r;
529 }
530 EXPORT_SYMBOL(ttm_pool_alloc);
531 
532 /**
533  * ttm_pool_free - Free the backing pages from a ttm_tt object
534  *
535  * @pool: Pool to give pages back to.
536  * @tt: ttm_tt object to unpopulate
537  *
538  * Give the packing pages back to a pool or free them
539  */
540 void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
541 {
542 	ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages);
543 
544 	while (atomic_long_read(&allocated_pages) > page_pool_size)
545 		ttm_pool_shrink();
546 }
547 EXPORT_SYMBOL(ttm_pool_free);
548 
549 /**
550  * ttm_pool_init - Initialize a pool
551  *
552  * @pool: the pool to initialize
553  * @dev: device for DMA allocations and mappings
554  * @nid: NUMA node to use for allocations
555  * @use_dma_alloc: true if coherent DMA alloc should be used
556  * @use_dma32: true if GFP_DMA32 should be used
557  *
558  * Initialize the pool and its pool types.
559  */
560 void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
561 		   int nid, bool use_dma_alloc, bool use_dma32)
562 {
563 	unsigned int i, j;
564 
565 	WARN_ON(!dev && use_dma_alloc);
566 
567 	pool->dev = dev;
568 	pool->nid = nid;
569 	pool->use_dma_alloc = use_dma_alloc;
570 	pool->use_dma32 = use_dma32;
571 
572 	for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
573 		for (j = 0; j < NR_PAGE_ORDERS; ++j) {
574 			struct ttm_pool_type *pt;
575 
576 			/* Initialize only pool types which are actually used */
577 			pt = ttm_pool_select_type(pool, i, j);
578 			if (pt != &pool->caching[i].orders[j])
579 				continue;
580 
581 			ttm_pool_type_init(pt, pool, i, j);
582 		}
583 	}
584 }
585 EXPORT_SYMBOL(ttm_pool_init);
586 
587 /**
588  * ttm_pool_fini - Cleanup a pool
589  *
590  * @pool: the pool to clean up
591  *
592  * Free all pages in the pool and unregister the types from the global
593  * shrinker.
594  */
595 void ttm_pool_fini(struct ttm_pool *pool)
596 {
597 	unsigned int i, j;
598 
599 	for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
600 		for (j = 0; j < NR_PAGE_ORDERS; ++j) {
601 			struct ttm_pool_type *pt;
602 
603 			pt = ttm_pool_select_type(pool, i, j);
604 			if (pt != &pool->caching[i].orders[j])
605 				continue;
606 
607 			ttm_pool_type_fini(pt);
608 		}
609 	}
610 
611 	/* We removed the pool types from the LRU, but we need to also make sure
612 	 * that no shrinker is concurrently freeing pages from the pool.
613 	 */
614 	synchronize_shrinkers();
615 }
616 EXPORT_SYMBOL(ttm_pool_fini);
617 
618 /* As long as pages are available make sure to release at least one */
619 static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
620 					    struct shrink_control *sc)
621 {
622 	unsigned long num_freed = 0;
623 
624 	do
625 		num_freed += ttm_pool_shrink();
626 	while (!num_freed && atomic_long_read(&allocated_pages));
627 
628 	return num_freed;
629 }
630 
631 /* Return the number of pages available or SHRINK_EMPTY if we have none */
632 static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
633 					     struct shrink_control *sc)
634 {
635 	unsigned long num_pages = atomic_long_read(&allocated_pages);
636 
637 	return num_pages ? num_pages : SHRINK_EMPTY;
638 }
639 
640 #ifdef CONFIG_DEBUG_FS
641 /* Count the number of pages available in a pool_type */
642 static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
643 {
644 	unsigned int count = 0;
645 	struct page *p;
646 
647 	spin_lock(&pt->lock);
648 	/* Only used for debugfs, the overhead doesn't matter */
649 	list_for_each_entry(p, &pt->pages, lru)
650 		++count;
651 	spin_unlock(&pt->lock);
652 
653 	return count;
654 }
655 
656 /* Print a nice header for the order */
657 static void ttm_pool_debugfs_header(struct seq_file *m)
658 {
659 	unsigned int i;
660 
661 	seq_puts(m, "\t ");
662 	for (i = 0; i < NR_PAGE_ORDERS; ++i)
663 		seq_printf(m, " ---%2u---", i);
664 	seq_puts(m, "\n");
665 }
666 
667 /* Dump information about the different pool types */
668 static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
669 				    struct seq_file *m)
670 {
671 	unsigned int i;
672 
673 	for (i = 0; i < NR_PAGE_ORDERS; ++i)
674 		seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
675 	seq_puts(m, "\n");
676 }
677 
678 /* Dump the total amount of allocated pages */
679 static void ttm_pool_debugfs_footer(struct seq_file *m)
680 {
681 	seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
682 		   atomic_long_read(&allocated_pages), page_pool_size);
683 }
684 
685 /* Dump the information for the global pools */
686 static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
687 {
688 	ttm_pool_debugfs_header(m);
689 
690 	spin_lock(&shrinker_lock);
691 	seq_puts(m, "wc\t:");
692 	ttm_pool_debugfs_orders(global_write_combined, m);
693 	seq_puts(m, "uc\t:");
694 	ttm_pool_debugfs_orders(global_uncached, m);
695 	seq_puts(m, "wc 32\t:");
696 	ttm_pool_debugfs_orders(global_dma32_write_combined, m);
697 	seq_puts(m, "uc 32\t:");
698 	ttm_pool_debugfs_orders(global_dma32_uncached, m);
699 	spin_unlock(&shrinker_lock);
700 
701 	ttm_pool_debugfs_footer(m);
702 
703 	return 0;
704 }
705 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_globals);
706 
707 /**
708  * ttm_pool_debugfs - Debugfs dump function for a pool
709  *
710  * @pool: the pool to dump the information for
711  * @m: seq_file to dump to
712  *
713  * Make a debugfs dump with the per pool and global information.
714  */
715 int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
716 {
717 	unsigned int i;
718 
719 	if (!pool->use_dma_alloc) {
720 		seq_puts(m, "unused\n");
721 		return 0;
722 	}
723 
724 	ttm_pool_debugfs_header(m);
725 
726 	spin_lock(&shrinker_lock);
727 	for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
728 		seq_puts(m, "DMA ");
729 		switch (i) {
730 		case ttm_cached:
731 			seq_puts(m, "\t:");
732 			break;
733 		case ttm_write_combined:
734 			seq_puts(m, "wc\t:");
735 			break;
736 		case ttm_uncached:
737 			seq_puts(m, "uc\t:");
738 			break;
739 		}
740 		ttm_pool_debugfs_orders(pool->caching[i].orders, m);
741 	}
742 	spin_unlock(&shrinker_lock);
743 
744 	ttm_pool_debugfs_footer(m);
745 	return 0;
746 }
747 EXPORT_SYMBOL(ttm_pool_debugfs);
748 
749 /* Test the shrinker functions and dump the result */
750 static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
751 {
752 	struct shrink_control sc = { .gfp_mask = GFP_NOFS };
753 
754 	fs_reclaim_acquire(GFP_KERNEL);
755 	seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(&mm_shrinker, &sc),
756 		   ttm_pool_shrinker_scan(&mm_shrinker, &sc));
757 	fs_reclaim_release(GFP_KERNEL);
758 
759 	return 0;
760 }
761 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink);
762 
763 #endif
764 
765 /**
766  * ttm_pool_mgr_init - Initialize globals
767  *
768  * @num_pages: default number of pages
769  *
770  * Initialize the global locks and lists for the MM shrinker.
771  */
772 int ttm_pool_mgr_init(unsigned long num_pages)
773 {
774 	unsigned int i;
775 
776 	if (!page_pool_size)
777 		page_pool_size = num_pages;
778 
779 	spin_lock_init(&shrinker_lock);
780 	INIT_LIST_HEAD(&shrinker_list);
781 
782 	for (i = 0; i < NR_PAGE_ORDERS; ++i) {
783 		ttm_pool_type_init(&global_write_combined[i], NULL,
784 				   ttm_write_combined, i);
785 		ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
786 
787 		ttm_pool_type_init(&global_dma32_write_combined[i], NULL,
788 				   ttm_write_combined, i);
789 		ttm_pool_type_init(&global_dma32_uncached[i], NULL,
790 				   ttm_uncached, i);
791 	}
792 
793 #ifdef CONFIG_DEBUG_FS
794 	debugfs_create_file("page_pool", 0444, ttm_debugfs_root, NULL,
795 			    &ttm_pool_debugfs_globals_fops);
796 	debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL,
797 			    &ttm_pool_debugfs_shrink_fops);
798 #endif
799 
800 	mm_shrinker.count_objects = ttm_pool_shrinker_count;
801 	mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
802 	mm_shrinker.seeks = 1;
803 	return register_shrinker(&mm_shrinker, "drm-ttm_pool");
804 }
805 
806 /**
807  * ttm_pool_mgr_fini - Finalize globals
808  *
809  * Cleanup the global pools and unregister the MM shrinker.
810  */
811 void ttm_pool_mgr_fini(void)
812 {
813 	unsigned int i;
814 
815 	for (i = 0; i < NR_PAGE_ORDERS; ++i) {
816 		ttm_pool_type_fini(&global_write_combined[i]);
817 		ttm_pool_type_fini(&global_uncached[i]);
818 
819 		ttm_pool_type_fini(&global_dma32_write_combined[i]);
820 		ttm_pool_type_fini(&global_dma32_uncached[i]);
821 	}
822 
823 	unregister_shrinker(&mm_shrinker);
824 	WARN_ON(!list_empty(&shrinker_list));
825 }
826