1e860c299SDavid Rientjes // SPDX-License-Identifier: GPL-2.0
2e860c299SDavid Rientjes /*
3e860c299SDavid Rientjes * Copyright (C) 2012 ARM Ltd.
4e860c299SDavid Rientjes * Copyright (C) 2020 Google LLC
5e860c299SDavid Rientjes */
6d7e673ecSNicolas Saenz Julienne #include <linux/cma.h>
72edc5bb3SDavid Rientjes #include <linux/debugfs.h>
80b1abd1fSChristoph Hellwig #include <linux/dma-map-ops.h>
9e860c299SDavid Rientjes #include <linux/dma-direct.h>
10e860c299SDavid Rientjes #include <linux/init.h>
11e860c299SDavid Rientjes #include <linux/genalloc.h>
1276a19940SDavid Rientjes #include <linux/set_memory.h>
13e860c299SDavid Rientjes #include <linux/slab.h>
1454adadf9SDavid Rientjes #include <linux/workqueue.h>
15e860c299SDavid Rientjes
16c84dc6e6SDavid Rientjes static struct gen_pool *atomic_pool_dma __ro_after_init;
172edc5bb3SDavid Rientjes static unsigned long pool_size_dma;
18c84dc6e6SDavid Rientjes static struct gen_pool *atomic_pool_dma32 __ro_after_init;
192edc5bb3SDavid Rientjes static unsigned long pool_size_dma32;
20c84dc6e6SDavid Rientjes static struct gen_pool *atomic_pool_kernel __ro_after_init;
212edc5bb3SDavid Rientjes static unsigned long pool_size_kernel;
22e860c299SDavid Rientjes
231d659236SDavid Rientjes /* Size can be defined by the coherent_pool command line */
241d659236SDavid Rientjes static size_t atomic_pool_size;
2554adadf9SDavid Rientjes
2654adadf9SDavid Rientjes /* Dynamic background expansion when the atomic pool is near capacity */
2754adadf9SDavid Rientjes static struct work_struct atomic_pool_work;
28e860c299SDavid Rientjes
early_coherent_pool(char * p)29e860c299SDavid Rientjes static int __init early_coherent_pool(char *p)
30e860c299SDavid Rientjes {
31e860c299SDavid Rientjes atomic_pool_size = memparse(p, &p);
32e860c299SDavid Rientjes return 0;
33e860c299SDavid Rientjes }
34e860c299SDavid Rientjes early_param("coherent_pool", early_coherent_pool);
35e860c299SDavid Rientjes
dma_atomic_pool_debugfs_init(void)362edc5bb3SDavid Rientjes static void __init dma_atomic_pool_debugfs_init(void)
372edc5bb3SDavid Rientjes {
382edc5bb3SDavid Rientjes struct dentry *root;
392edc5bb3SDavid Rientjes
402edc5bb3SDavid Rientjes root = debugfs_create_dir("dma_pools", NULL);
412edc5bb3SDavid Rientjes debugfs_create_ulong("pool_size_dma", 0400, root, &pool_size_dma);
422edc5bb3SDavid Rientjes debugfs_create_ulong("pool_size_dma32", 0400, root, &pool_size_dma32);
432edc5bb3SDavid Rientjes debugfs_create_ulong("pool_size_kernel", 0400, root, &pool_size_kernel);
442edc5bb3SDavid Rientjes }
452edc5bb3SDavid Rientjes
dma_atomic_pool_size_add(gfp_t gfp,size_t size)462edc5bb3SDavid Rientjes static void dma_atomic_pool_size_add(gfp_t gfp, size_t size)
472edc5bb3SDavid Rientjes {
482edc5bb3SDavid Rientjes if (gfp & __GFP_DMA)
492edc5bb3SDavid Rientjes pool_size_dma += size;
502edc5bb3SDavid Rientjes else if (gfp & __GFP_DMA32)
512edc5bb3SDavid Rientjes pool_size_dma32 += size;
522edc5bb3SDavid Rientjes else
532edc5bb3SDavid Rientjes pool_size_kernel += size;
542edc5bb3SDavid Rientjes }
552edc5bb3SDavid Rientjes
cma_in_zone(gfp_t gfp)56d7e673ecSNicolas Saenz Julienne static bool cma_in_zone(gfp_t gfp)
57d7e673ecSNicolas Saenz Julienne {
58d7e673ecSNicolas Saenz Julienne unsigned long size;
59d7e673ecSNicolas Saenz Julienne phys_addr_t end;
60d7e673ecSNicolas Saenz Julienne struct cma *cma;
61d7e673ecSNicolas Saenz Julienne
62d7e673ecSNicolas Saenz Julienne cma = dev_get_cma_area(NULL);
63d7e673ecSNicolas Saenz Julienne if (!cma)
64d7e673ecSNicolas Saenz Julienne return false;
65d7e673ecSNicolas Saenz Julienne
66d7e673ecSNicolas Saenz Julienne size = cma_get_size(cma);
67d7e673ecSNicolas Saenz Julienne if (!size)
68d7e673ecSNicolas Saenz Julienne return false;
69d7e673ecSNicolas Saenz Julienne
70d7e673ecSNicolas Saenz Julienne /* CMA can't cross zone boundaries, see cma_activate_area() */
71d7e673ecSNicolas Saenz Julienne end = cma_get_base(cma) + size - 1;
72d7e673ecSNicolas Saenz Julienne if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
73d7e673ecSNicolas Saenz Julienne return end <= DMA_BIT_MASK(zone_dma_bits);
74d7e673ecSNicolas Saenz Julienne if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
75d7e673ecSNicolas Saenz Julienne return end <= DMA_BIT_MASK(32);
76d7e673ecSNicolas Saenz Julienne return true;
77d7e673ecSNicolas Saenz Julienne }
78d7e673ecSNicolas Saenz Julienne
atomic_pool_expand(struct gen_pool * pool,size_t pool_size,gfp_t gfp)7954adadf9SDavid Rientjes static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
8054adadf9SDavid Rientjes gfp_t gfp)
81e860c299SDavid Rientjes {
8254adadf9SDavid Rientjes unsigned int order;
83892fc9f6SDan Carpenter struct page *page = NULL;
84e860c299SDavid Rientjes void *addr;
8554adadf9SDavid Rientjes int ret = -ENOMEM;
8654adadf9SDavid Rientjes
8723baf831SKirill A. Shutemov /* Cannot allocate larger than MAX_ORDER */
8823baf831SKirill A. Shutemov order = min(get_order(pool_size), MAX_ORDER);
8954adadf9SDavid Rientjes
9054adadf9SDavid Rientjes do {
9154adadf9SDavid Rientjes pool_size = 1 << (PAGE_SHIFT + order);
92d7e673ecSNicolas Saenz Julienne if (cma_in_zone(gfp))
93d7e673ecSNicolas Saenz Julienne page = dma_alloc_from_contiguous(NULL, 1 << order,
94d7e673ecSNicolas Saenz Julienne order, false);
95d7e673ecSNicolas Saenz Julienne if (!page)
96c84dc6e6SDavid Rientjes page = alloc_pages(gfp, order);
9754adadf9SDavid Rientjes } while (!page && order-- > 0);
98e860c299SDavid Rientjes if (!page)
99e860c299SDavid Rientjes goto out;
100e860c299SDavid Rientjes
101c84dc6e6SDavid Rientjes arch_dma_prep_coherent(page, pool_size);
102e860c299SDavid Rientjes
10376a19940SDavid Rientjes #ifdef CONFIG_DMA_DIRECT_REMAP
104c84dc6e6SDavid Rientjes addr = dma_common_contiguous_remap(page, pool_size,
105e860c299SDavid Rientjes pgprot_dmacoherent(PAGE_KERNEL),
106e860c299SDavid Rientjes __builtin_return_address(0));
107e860c299SDavid Rientjes if (!addr)
10854adadf9SDavid Rientjes goto free_page;
10976a19940SDavid Rientjes #else
11076a19940SDavid Rientjes addr = page_to_virt(page);
11176a19940SDavid Rientjes #endif
11276a19940SDavid Rientjes /*
11376a19940SDavid Rientjes * Memory in the atomic DMA pools must be unencrypted, the pools do not
1142f5388a2SChristoph Hellwig * shrink so no re-encryption occurs in dma_direct_free().
11576a19940SDavid Rientjes */
11676a19940SDavid Rientjes ret = set_memory_decrypted((unsigned long)page_to_virt(page),
11776a19940SDavid Rientjes 1 << order);
11876a19940SDavid Rientjes if (ret)
11976a19940SDavid Rientjes goto remove_mapping;
12054adadf9SDavid Rientjes ret = gen_pool_add_virt(pool, (unsigned long)addr, page_to_phys(page),
12154adadf9SDavid Rientjes pool_size, NUMA_NO_NODE);
122e860c299SDavid Rientjes if (ret)
12376a19940SDavid Rientjes goto encrypt_mapping;
124e860c299SDavid Rientjes
1252edc5bb3SDavid Rientjes dma_atomic_pool_size_add(gfp, pool_size);
126e860c299SDavid Rientjes return 0;
127e860c299SDavid Rientjes
12876a19940SDavid Rientjes encrypt_mapping:
12976a19940SDavid Rientjes ret = set_memory_encrypted((unsigned long)page_to_virt(page),
13076a19940SDavid Rientjes 1 << order);
13176a19940SDavid Rientjes if (WARN_ON_ONCE(ret)) {
13276a19940SDavid Rientjes /* Decrypt succeeded but encrypt failed, purposely leak */
13376a19940SDavid Rientjes goto out;
13476a19940SDavid Rientjes }
135e860c299SDavid Rientjes remove_mapping:
13676a19940SDavid Rientjes #ifdef CONFIG_DMA_DIRECT_REMAP
137c84dc6e6SDavid Rientjes dma_common_free_remap(addr, pool_size);
138*765aa6b3SChristoph Hellwig free_page:
139c84dc6e6SDavid Rientjes __free_pages(page, order);
140*765aa6b3SChristoph Hellwig #endif
141e860c299SDavid Rientjes out:
14254adadf9SDavid Rientjes return ret;
14354adadf9SDavid Rientjes }
14454adadf9SDavid Rientjes
atomic_pool_resize(struct gen_pool * pool,gfp_t gfp)14554adadf9SDavid Rientjes static void atomic_pool_resize(struct gen_pool *pool, gfp_t gfp)
14654adadf9SDavid Rientjes {
14754adadf9SDavid Rientjes if (pool && gen_pool_avail(pool) < atomic_pool_size)
14854adadf9SDavid Rientjes atomic_pool_expand(pool, gen_pool_size(pool), gfp);
14954adadf9SDavid Rientjes }
15054adadf9SDavid Rientjes
atomic_pool_work_fn(struct work_struct * work)15154adadf9SDavid Rientjes static void atomic_pool_work_fn(struct work_struct *work)
15254adadf9SDavid Rientjes {
15354adadf9SDavid Rientjes if (IS_ENABLED(CONFIG_ZONE_DMA))
15454adadf9SDavid Rientjes atomic_pool_resize(atomic_pool_dma,
15554adadf9SDavid Rientjes GFP_KERNEL | GFP_DMA);
15654adadf9SDavid Rientjes if (IS_ENABLED(CONFIG_ZONE_DMA32))
15754adadf9SDavid Rientjes atomic_pool_resize(atomic_pool_dma32,
15854adadf9SDavid Rientjes GFP_KERNEL | GFP_DMA32);
15954adadf9SDavid Rientjes atomic_pool_resize(atomic_pool_kernel, GFP_KERNEL);
16054adadf9SDavid Rientjes }
16154adadf9SDavid Rientjes
__dma_atomic_pool_init(size_t pool_size,gfp_t gfp)16254adadf9SDavid Rientjes static __init struct gen_pool *__dma_atomic_pool_init(size_t pool_size,
16354adadf9SDavid Rientjes gfp_t gfp)
16454adadf9SDavid Rientjes {
16554adadf9SDavid Rientjes struct gen_pool *pool;
16654adadf9SDavid Rientjes int ret;
16754adadf9SDavid Rientjes
16854adadf9SDavid Rientjes pool = gen_pool_create(PAGE_SHIFT, NUMA_NO_NODE);
16954adadf9SDavid Rientjes if (!pool)
17054adadf9SDavid Rientjes return NULL;
17154adadf9SDavid Rientjes
17254adadf9SDavid Rientjes gen_pool_set_algo(pool, gen_pool_first_fit_order_align, NULL);
17354adadf9SDavid Rientjes
17454adadf9SDavid Rientjes ret = atomic_pool_expand(pool, pool_size, gfp);
17554adadf9SDavid Rientjes if (ret) {
17654adadf9SDavid Rientjes gen_pool_destroy(pool);
177c84dc6e6SDavid Rientjes pr_err("DMA: failed to allocate %zu KiB %pGg pool for atomic allocation\n",
178c84dc6e6SDavid Rientjes pool_size >> 10, &gfp);
17954adadf9SDavid Rientjes return NULL;
18054adadf9SDavid Rientjes }
18154adadf9SDavid Rientjes
18254adadf9SDavid Rientjes pr_info("DMA: preallocated %zu KiB %pGg pool for atomic allocations\n",
18354adadf9SDavid Rientjes gen_pool_size(pool) >> 10, &gfp);
18454adadf9SDavid Rientjes return pool;
185e860c299SDavid Rientjes }
186c84dc6e6SDavid Rientjes
dma_atomic_pool_init(void)187c84dc6e6SDavid Rientjes static int __init dma_atomic_pool_init(void)
188c84dc6e6SDavid Rientjes {
189c84dc6e6SDavid Rientjes int ret = 0;
190c84dc6e6SDavid Rientjes
1911d659236SDavid Rientjes /*
1921d659236SDavid Rientjes * If coherent_pool was not used on the command line, default the pool
19323baf831SKirill A. Shutemov * sizes to 128KB per 1GB of memory, min 128KB, max MAX_ORDER.
1941d659236SDavid Rientjes */
1951d659236SDavid Rientjes if (!atomic_pool_size) {
1963ee06a6dSGeert Uytterhoeven unsigned long pages = totalram_pages() / (SZ_1G / SZ_128K);
1973ee06a6dSGeert Uytterhoeven pages = min_t(unsigned long, pages, MAX_ORDER_NR_PAGES);
1983ee06a6dSGeert Uytterhoeven atomic_pool_size = max_t(size_t, pages << PAGE_SHIFT, SZ_128K);
1991d659236SDavid Rientjes }
20054adadf9SDavid Rientjes INIT_WORK(&atomic_pool_work, atomic_pool_work_fn);
20154adadf9SDavid Rientjes
20254adadf9SDavid Rientjes atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size,
203c84dc6e6SDavid Rientjes GFP_KERNEL);
20454adadf9SDavid Rientjes if (!atomic_pool_kernel)
20554adadf9SDavid Rientjes ret = -ENOMEM;
206a674e48cSBaoquan He if (has_managed_dma()) {
20754adadf9SDavid Rientjes atomic_pool_dma = __dma_atomic_pool_init(atomic_pool_size,
20854adadf9SDavid Rientjes GFP_KERNEL | GFP_DMA);
20954adadf9SDavid Rientjes if (!atomic_pool_dma)
21054adadf9SDavid Rientjes ret = -ENOMEM;
211c84dc6e6SDavid Rientjes }
212c84dc6e6SDavid Rientjes if (IS_ENABLED(CONFIG_ZONE_DMA32)) {
21354adadf9SDavid Rientjes atomic_pool_dma32 = __dma_atomic_pool_init(atomic_pool_size,
21454adadf9SDavid Rientjes GFP_KERNEL | GFP_DMA32);
21554adadf9SDavid Rientjes if (!atomic_pool_dma32)
21654adadf9SDavid Rientjes ret = -ENOMEM;
217c84dc6e6SDavid Rientjes }
2182edc5bb3SDavid Rientjes
2192edc5bb3SDavid Rientjes dma_atomic_pool_debugfs_init();
220c84dc6e6SDavid Rientjes return ret;
221c84dc6e6SDavid Rientjes }
222e860c299SDavid Rientjes postcore_initcall(dma_atomic_pool_init);
223e860c299SDavid Rientjes
dma_guess_pool(struct gen_pool * prev,gfp_t gfp)2249420139fSChristoph Hellwig static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp)
225e860c299SDavid Rientjes {
2269420139fSChristoph Hellwig if (prev == NULL) {
2279420139fSChristoph Hellwig if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
228c84dc6e6SDavid Rientjes return atomic_pool_dma32;
229a674e48cSBaoquan He if (atomic_pool_dma && (gfp & GFP_DMA))
2309420139fSChristoph Hellwig return atomic_pool_dma;
231c84dc6e6SDavid Rientjes return atomic_pool_kernel;
232e860c299SDavid Rientjes }
2339420139fSChristoph Hellwig if (prev == atomic_pool_kernel)
2349420139fSChristoph Hellwig return atomic_pool_dma32 ? atomic_pool_dma32 : atomic_pool_dma;
2359420139fSChristoph Hellwig if (prev == atomic_pool_dma32)
23648b67038SNicolas Saenz Julienne return atomic_pool_dma;
23748b67038SNicolas Saenz Julienne return NULL;
23848b67038SNicolas Saenz Julienne }
23948b67038SNicolas Saenz Julienne
__dma_alloc_from_pool(struct device * dev,size_t size,struct gen_pool * pool,void ** cpu_addr,bool (* phys_addr_ok)(struct device *,phys_addr_t,size_t))2409420139fSChristoph Hellwig static struct page *__dma_alloc_from_pool(struct device *dev, size_t size,
2419420139fSChristoph Hellwig struct gen_pool *pool, void **cpu_addr,
2429420139fSChristoph Hellwig bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
24348b67038SNicolas Saenz Julienne {
2449420139fSChristoph Hellwig unsigned long addr;
24581e9d894SNicolas Saenz Julienne phys_addr_t phys;
246e860c299SDavid Rientjes
2479420139fSChristoph Hellwig addr = gen_pool_alloc(pool, size);
2489420139fSChristoph Hellwig if (!addr)
2499420139fSChristoph Hellwig return NULL;
2509420139fSChristoph Hellwig
2519420139fSChristoph Hellwig phys = gen_pool_virt_to_phys(pool, addr);
2529420139fSChristoph Hellwig if (phys_addr_ok && !phys_addr_ok(dev, phys, size)) {
2539420139fSChristoph Hellwig gen_pool_free(pool, addr, size);
2549420139fSChristoph Hellwig return NULL;
255e860c299SDavid Rientjes }
256e860c299SDavid Rientjes
25754adadf9SDavid Rientjes if (gen_pool_avail(pool) < atomic_pool_size)
25854adadf9SDavid Rientjes schedule_work(&atomic_pool_work);
2599420139fSChristoph Hellwig
2609420139fSChristoph Hellwig *cpu_addr = (void *)addr;
2619420139fSChristoph Hellwig memset(*cpu_addr, 0, size);
2629420139fSChristoph Hellwig return pfn_to_page(__phys_to_pfn(phys));
26381e9d894SNicolas Saenz Julienne }
264e860c299SDavid Rientjes
dma_alloc_from_pool(struct device * dev,size_t size,void ** cpu_addr,gfp_t gfp,bool (* phys_addr_ok)(struct device *,phys_addr_t,size_t))2659420139fSChristoph Hellwig struct page *dma_alloc_from_pool(struct device *dev, size_t size,
2669420139fSChristoph Hellwig void **cpu_addr, gfp_t gfp,
2679420139fSChristoph Hellwig bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
2689420139fSChristoph Hellwig {
2699420139fSChristoph Hellwig struct gen_pool *pool = NULL;
2709420139fSChristoph Hellwig struct page *page;
2719420139fSChristoph Hellwig
2729420139fSChristoph Hellwig while ((pool = dma_guess_pool(pool, gfp))) {
2739420139fSChristoph Hellwig page = __dma_alloc_from_pool(dev, size, pool, cpu_addr,
2749420139fSChristoph Hellwig phys_addr_ok);
2759420139fSChristoph Hellwig if (page)
2769420139fSChristoph Hellwig return page;
2779420139fSChristoph Hellwig }
2789420139fSChristoph Hellwig
2799420139fSChristoph Hellwig WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev));
2809420139fSChristoph Hellwig return NULL;
281e860c299SDavid Rientjes }
282e860c299SDavid Rientjes
dma_free_from_pool(struct device * dev,void * start,size_t size)283c84dc6e6SDavid Rientjes bool dma_free_from_pool(struct device *dev, void *start, size_t size)
284e860c299SDavid Rientjes {
28581e9d894SNicolas Saenz Julienne struct gen_pool *pool = NULL;
286c84dc6e6SDavid Rientjes
2879420139fSChristoph Hellwig while ((pool = dma_guess_pool(pool, 0))) {
2889420139fSChristoph Hellwig if (!gen_pool_has_addr(pool, (unsigned long)start, size))
2899420139fSChristoph Hellwig continue;
290c84dc6e6SDavid Rientjes gen_pool_free(pool, (unsigned long)start, size);
291e860c299SDavid Rientjes return true;
292e860c299SDavid Rientjes }
2939420139fSChristoph Hellwig
2949420139fSChristoph Hellwig return false;
29581e9d894SNicolas Saenz Julienne }
296