Lines Matching +full:dma +full:- +full:pool
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
30 * Additional to that allocations from the DMA coherent API are pooled as well
35 #include <linux/dma-mapping.h>
51 * struct ttm_pool_dma - Helper object for coherent DMA mappings
53 * @addr: original DMA address returned for the mapping
63 MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
79 static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags, in ttm_pool_alloc_page() argument
83 struct ttm_pool_dma *dma; in ttm_pool_alloc_page() local
95 if (!pool->use_dma_alloc) { in ttm_pool_alloc_page()
96 p = alloc_pages_node(pool->nid, gfp_flags, order); in ttm_pool_alloc_page()
98 p->private = order; in ttm_pool_alloc_page()
102 dma = kmalloc(sizeof(*dma), GFP_KERNEL); in ttm_pool_alloc_page()
103 if (!dma) in ttm_pool_alloc_page()
109 vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE, in ttm_pool_alloc_page()
110 &dma->addr, gfp_flags, attr); in ttm_pool_alloc_page()
114 /* TODO: This is an illegal abuse of the DMA API, but we need to rework in ttm_pool_alloc_page()
115 * TTM page fault handling and extend the DMA API to clean this up. in ttm_pool_alloc_page()
122 dma->vaddr = (unsigned long)vaddr | order; in ttm_pool_alloc_page()
123 p->private = (unsigned long)dma; in ttm_pool_alloc_page()
127 kfree(dma); in ttm_pool_alloc_page()
132 static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching, in ttm_pool_free_page() argument
136 struct ttm_pool_dma *dma; in ttm_pool_free_page() local
147 if (!pool || !pool->use_dma_alloc) { in ttm_pool_free_page()
155 dma = (void *)p->private; in ttm_pool_free_page()
156 vaddr = (void *)(dma->vaddr & PAGE_MASK); in ttm_pool_free_page()
157 dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr, in ttm_pool_free_page()
159 kfree(dma); in ttm_pool_free_page()
167 unsigned int num_pages = last - first; in ttm_pool_apply_caching()
184 /* Map pages of 1 << order size and fill the DMA address array */
185 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order, in ttm_pool_map() argument
191 if (pool->use_dma_alloc) { in ttm_pool_map()
192 struct ttm_pool_dma *dma = (void *)p->private; in ttm_pool_map() local
194 addr = dma->addr; in ttm_pool_map()
198 addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL); in ttm_pool_map()
199 if (dma_mapping_error(pool->dev, addr)) in ttm_pool_map()
200 return -EFAULT; in ttm_pool_map()
203 for (i = 1 << order; i ; --i) { in ttm_pool_map()
212 static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr, in ttm_pool_unmap() argument
216 if (pool->use_dma_alloc) in ttm_pool_unmap()
219 dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT, in ttm_pool_unmap()
226 unsigned int i, num_pages = 1 << pt->order; in ttm_pool_type_give()
235 spin_lock(&pt->lock); in ttm_pool_type_give()
236 list_add(&p->lru, &pt->pages); in ttm_pool_type_give()
237 spin_unlock(&pt->lock); in ttm_pool_type_give()
238 atomic_long_add(1 << pt->order, &allocated_pages); in ttm_pool_type_give()
246 spin_lock(&pt->lock); in ttm_pool_type_take()
247 p = list_first_entry_or_null(&pt->pages, typeof(*p), lru); in ttm_pool_type_take()
249 atomic_long_sub(1 << pt->order, &allocated_pages); in ttm_pool_type_take()
250 list_del(&p->lru); in ttm_pool_type_take()
252 spin_unlock(&pt->lock); in ttm_pool_type_take()
257 /* Initialize and add a pool type to the global shrinker list */
258 static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool, in ttm_pool_type_init() argument
261 pt->pool = pool; in ttm_pool_type_init()
262 pt->caching = caching; in ttm_pool_type_init()
263 pt->order = order; in ttm_pool_type_init()
264 spin_lock_init(&pt->lock); in ttm_pool_type_init()
265 INIT_LIST_HEAD(&pt->pages); in ttm_pool_type_init()
268 list_add_tail(&pt->shrinker_list, &shrinker_list); in ttm_pool_type_init()
278 list_del(&pt->shrinker_list); in ttm_pool_type_fini()
282 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); in ttm_pool_type_fini()
286 static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool, in ttm_pool_select_type() argument
290 if (pool->use_dma_alloc) in ttm_pool_select_type()
291 return &pool->caching[caching].orders[order]; in ttm_pool_select_type()
296 if (pool->nid != NUMA_NO_NODE) in ttm_pool_select_type()
297 return &pool->caching[caching].orders[order]; in ttm_pool_select_type()
299 if (pool->use_dma32) in ttm_pool_select_type()
304 if (pool->nid != NUMA_NO_NODE) in ttm_pool_select_type()
305 return &pool->caching[caching].orders[order]; in ttm_pool_select_type()
307 if (pool->use_dma32) in ttm_pool_select_type()
328 list_move_tail(&pt->shrinker_list, &shrinker_list); in ttm_pool_shrink()
333 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); in ttm_pool_shrink()
334 num_pages = 1 << pt->order; in ttm_pool_shrink()
343 static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p) in ttm_pool_page_order() argument
345 if (pool->use_dma_alloc) { in ttm_pool_page_order()
346 struct ttm_pool_dma *dma = (void *)p->private; in ttm_pool_page_order() local
348 return dma->vaddr & ~PAGE_MASK; in ttm_pool_page_order()
351 return p->private; in ttm_pool_page_order()
354 /* Called when we got a page, either from a pool or newly allocated */
355 static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order, in ttm_pool_page_allocated() argument
364 r = ttm_pool_map(pool, order, p, dma_addr); in ttm_pool_page_allocated()
369 *num_pages -= 1 << order; in ttm_pool_page_allocated()
370 for (i = 1 << order; i; --i, ++(*pages), ++p) in ttm_pool_page_allocated()
377 * ttm_pool_free_range() - Free a range of TTM pages
378 * @pool: The pool used for allocating.
384 * During allocation the ttm_tt page-vector may be populated with ranges of
389 static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt, in ttm_pool_free_range() argument
393 struct page **pages = &tt->pages[start_page]; in ttm_pool_free_range()
400 order = ttm_pool_page_order(pool, *pages); in ttm_pool_free_range()
402 if (tt->dma_address) in ttm_pool_free_range()
403 ttm_pool_unmap(pool, tt->dma_address[i], nr); in ttm_pool_free_range()
405 pt = ttm_pool_select_type(pool, caching, order); in ttm_pool_free_range()
409 ttm_pool_free_page(pool, caching, order, *pages); in ttm_pool_free_range()
414 * ttm_pool_alloc - Fill a ttm_tt object
416 * @pool: ttm_pool to use
420 * Fill the ttm_tt object with pages and also make sure to DMA map them when
425 int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, in ttm_pool_alloc() argument
428 pgoff_t num_pages = tt->num_pages; in ttm_pool_alloc()
429 dma_addr_t *dma_addr = tt->dma_address; in ttm_pool_alloc()
430 struct page **caching = tt->pages; in ttm_pool_alloc()
431 struct page **pages = tt->pages; in ttm_pool_alloc()
440 WARN_ON(dma_addr && !pool->dev); in ttm_pool_alloc()
442 if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC) in ttm_pool_alloc()
445 if (ctx->gfp_retry_mayfail) in ttm_pool_alloc()
448 if (pool->use_dma32) in ttm_pool_alloc()
458 page_caching = tt->caching; in ttm_pool_alloc()
459 pt = ttm_pool_select_type(pool, tt->caching, order); in ttm_pool_alloc()
463 tt->caching); in ttm_pool_alloc()
469 r = ttm_pool_page_allocated(pool, order, p, in ttm_pool_alloc()
486 (p = ttm_pool_alloc_page(pool, gfp_flags, order))) { in ttm_pool_alloc()
490 tt->caching); in ttm_pool_alloc()
495 r = ttm_pool_page_allocated(pool, order, p, &dma_addr, in ttm_pool_alloc()
505 --order; in ttm_pool_alloc()
508 r = -ENOMEM; in ttm_pool_alloc()
513 r = ttm_pool_apply_caching(caching, pages, tt->caching); in ttm_pool_alloc()
520 ttm_pool_free_page(pool, page_caching, order, p); in ttm_pool_alloc()
523 num_pages = tt->num_pages - num_pages; in ttm_pool_alloc()
524 caching_divide = caching - tt->pages; in ttm_pool_alloc()
525 ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide); in ttm_pool_alloc()
526 ttm_pool_free_range(pool, tt, ttm_cached, caching_divide, num_pages); in ttm_pool_alloc()
533 * ttm_pool_free - Free the backing pages from a ttm_tt object
535 * @pool: Pool to give pages back to.
538 * Give the packing pages back to a pool or free them
540 void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt) in ttm_pool_free() argument
542 ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages); in ttm_pool_free()
550 * ttm_pool_init - Initialize a pool
552 * @pool: the pool to initialize
553 * @dev: device for DMA allocations and mappings
555 * @use_dma_alloc: true if coherent DMA alloc should be used
558 * Initialize the pool and its pool types.
560 void ttm_pool_init(struct ttm_pool *pool, struct device *dev, in ttm_pool_init() argument
567 pool->dev = dev; in ttm_pool_init()
568 pool->nid = nid; in ttm_pool_init()
569 pool->use_dma_alloc = use_dma_alloc; in ttm_pool_init()
570 pool->use_dma32 = use_dma32; in ttm_pool_init()
576 /* Initialize only pool types which are actually used */ in ttm_pool_init()
577 pt = ttm_pool_select_type(pool, i, j); in ttm_pool_init()
578 if (pt != &pool->caching[i].orders[j]) in ttm_pool_init()
581 ttm_pool_type_init(pt, pool, i, j); in ttm_pool_init()
588 * ttm_pool_fini - Cleanup a pool
590 * @pool: the pool to clean up
592 * Free all pages in the pool and unregister the types from the global
595 void ttm_pool_fini(struct ttm_pool *pool) in ttm_pool_fini() argument
603 pt = ttm_pool_select_type(pool, i, j); in ttm_pool_fini()
604 if (pt != &pool->caching[i].orders[j]) in ttm_pool_fini()
611 /* We removed the pool types from the LRU, but we need to also make sure in ttm_pool_fini()
612 * that no shrinker is concurrently freeing pages from the pool. in ttm_pool_fini()
647 spin_lock(&pt->lock); in ttm_pool_type_count()
649 list_for_each_entry(p, &pt->pages, lru) in ttm_pool_type_count()
651 spin_unlock(&pt->lock); in ttm_pool_type_count()
663 seq_printf(m, " ---%2u---", i); in ttm_pool_debugfs_header()
667 /* Dump information about the different pool types */
708 * ttm_pool_debugfs - Debugfs dump function for a pool
710 * @pool: the pool to dump the information for
713 * Make a debugfs dump with the per pool and global information.
715 int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m) in ttm_pool_debugfs() argument
719 if (!pool->use_dma_alloc) { in ttm_pool_debugfs()
728 seq_puts(m, "DMA "); in ttm_pool_debugfs()
740 ttm_pool_debugfs_orders(pool->caching[i].orders, m); in ttm_pool_debugfs()
766 * ttm_pool_mgr_init - Initialize globals
803 return register_shrinker(&mm_shrinker, "drm-ttm_pool"); in ttm_pool_mgr_init()
807 * ttm_pool_mgr_fini - Finalize globals