xref: /openbmc/linux/kernel/dma/pool.c (revision 7e60e389)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  * Copyright (C) 2020 Google LLC
5  */
6 #include <linux/cma.h>
7 #include <linux/debugfs.h>
8 #include <linux/dma-map-ops.h>
9 #include <linux/dma-direct.h>
10 #include <linux/init.h>
11 #include <linux/genalloc.h>
12 #include <linux/set_memory.h>
13 #include <linux/slab.h>
14 #include <linux/workqueue.h>
15 
16 static struct gen_pool *atomic_pool_dma __ro_after_init;
17 static unsigned long pool_size_dma;
18 static struct gen_pool *atomic_pool_dma32 __ro_after_init;
19 static unsigned long pool_size_dma32;
20 static struct gen_pool *atomic_pool_kernel __ro_after_init;
21 static unsigned long pool_size_kernel;
22 
23 /* Size can be defined by the coherent_pool command line */
24 static size_t atomic_pool_size;
25 
26 /* Dynamic background expansion when the atomic pool is near capacity */
27 static struct work_struct atomic_pool_work;
28 
29 static int __init early_coherent_pool(char *p)
30 {
31 	atomic_pool_size = memparse(p, &p);
32 	return 0;
33 }
34 early_param("coherent_pool", early_coherent_pool);
35 
36 static void __init dma_atomic_pool_debugfs_init(void)
37 {
38 	struct dentry *root;
39 
40 	root = debugfs_create_dir("dma_pools", NULL);
41 	debugfs_create_ulong("pool_size_dma", 0400, root, &pool_size_dma);
42 	debugfs_create_ulong("pool_size_dma32", 0400, root, &pool_size_dma32);
43 	debugfs_create_ulong("pool_size_kernel", 0400, root, &pool_size_kernel);
44 }
45 
46 static void dma_atomic_pool_size_add(gfp_t gfp, size_t size)
47 {
48 	if (gfp & __GFP_DMA)
49 		pool_size_dma += size;
50 	else if (gfp & __GFP_DMA32)
51 		pool_size_dma32 += size;
52 	else
53 		pool_size_kernel += size;
54 }
55 
56 static bool cma_in_zone(gfp_t gfp)
57 {
58 	unsigned long size;
59 	phys_addr_t end;
60 	struct cma *cma;
61 
62 	cma = dev_get_cma_area(NULL);
63 	if (!cma)
64 		return false;
65 
66 	size = cma_get_size(cma);
67 	if (!size)
68 		return false;
69 
70 	/* CMA can't cross zone boundaries, see cma_activate_area() */
71 	end = cma_get_base(cma) + size - 1;
72 	if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
73 		return end <= DMA_BIT_MASK(zone_dma_bits);
74 	if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
75 		return end <= DMA_BIT_MASK(32);
76 	return true;
77 }
78 
79 static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
80 			      gfp_t gfp)
81 {
82 	unsigned int order;
83 	struct page *page = NULL;
84 	void *addr;
85 	int ret = -ENOMEM;
86 
87 	/* Cannot allocate larger than MAX_ORDER-1 */
88 	order = min(get_order(pool_size), MAX_ORDER-1);
89 
90 	do {
91 		pool_size = 1 << (PAGE_SHIFT + order);
92 		if (cma_in_zone(gfp))
93 			page = dma_alloc_from_contiguous(NULL, 1 << order,
94 							 order, false);
95 		if (!page)
96 			page = alloc_pages(gfp, order);
97 	} while (!page && order-- > 0);
98 	if (!page)
99 		goto out;
100 
101 	arch_dma_prep_coherent(page, pool_size);
102 
103 #ifdef CONFIG_DMA_DIRECT_REMAP
104 	addr = dma_common_contiguous_remap(page, pool_size,
105 					   pgprot_dmacoherent(PAGE_KERNEL),
106 					   __builtin_return_address(0));
107 	if (!addr)
108 		goto free_page;
109 #else
110 	addr = page_to_virt(page);
111 #endif
112 	/*
113 	 * Memory in the atomic DMA pools must be unencrypted, the pools do not
114 	 * shrink so no re-encryption occurs in dma_direct_free().
115 	 */
116 	ret = set_memory_decrypted((unsigned long)page_to_virt(page),
117 				   1 << order);
118 	if (ret)
119 		goto remove_mapping;
120 	ret = gen_pool_add_virt(pool, (unsigned long)addr, page_to_phys(page),
121 				pool_size, NUMA_NO_NODE);
122 	if (ret)
123 		goto encrypt_mapping;
124 
125 	dma_atomic_pool_size_add(gfp, pool_size);
126 	return 0;
127 
128 encrypt_mapping:
129 	ret = set_memory_encrypted((unsigned long)page_to_virt(page),
130 				   1 << order);
131 	if (WARN_ON_ONCE(ret)) {
132 		/* Decrypt succeeded but encrypt failed, purposely leak */
133 		goto out;
134 	}
135 remove_mapping:
136 #ifdef CONFIG_DMA_DIRECT_REMAP
137 	dma_common_free_remap(addr, pool_size);
138 #endif
139 free_page: __maybe_unused
140 	__free_pages(page, order);
141 out:
142 	return ret;
143 }
144 
145 static void atomic_pool_resize(struct gen_pool *pool, gfp_t gfp)
146 {
147 	if (pool && gen_pool_avail(pool) < atomic_pool_size)
148 		atomic_pool_expand(pool, gen_pool_size(pool), gfp);
149 }
150 
151 static void atomic_pool_work_fn(struct work_struct *work)
152 {
153 	if (IS_ENABLED(CONFIG_ZONE_DMA))
154 		atomic_pool_resize(atomic_pool_dma,
155 				   GFP_KERNEL | GFP_DMA);
156 	if (IS_ENABLED(CONFIG_ZONE_DMA32))
157 		atomic_pool_resize(atomic_pool_dma32,
158 				   GFP_KERNEL | GFP_DMA32);
159 	atomic_pool_resize(atomic_pool_kernel, GFP_KERNEL);
160 }
161 
162 static __init struct gen_pool *__dma_atomic_pool_init(size_t pool_size,
163 						      gfp_t gfp)
164 {
165 	struct gen_pool *pool;
166 	int ret;
167 
168 	pool = gen_pool_create(PAGE_SHIFT, NUMA_NO_NODE);
169 	if (!pool)
170 		return NULL;
171 
172 	gen_pool_set_algo(pool, gen_pool_first_fit_order_align, NULL);
173 
174 	ret = atomic_pool_expand(pool, pool_size, gfp);
175 	if (ret) {
176 		gen_pool_destroy(pool);
177 		pr_err("DMA: failed to allocate %zu KiB %pGg pool for atomic allocation\n",
178 		       pool_size >> 10, &gfp);
179 		return NULL;
180 	}
181 
182 	pr_info("DMA: preallocated %zu KiB %pGg pool for atomic allocations\n",
183 		gen_pool_size(pool) >> 10, &gfp);
184 	return pool;
185 }
186 
187 static int __init dma_atomic_pool_init(void)
188 {
189 	int ret = 0;
190 
191 	/*
192 	 * If coherent_pool was not used on the command line, default the pool
193 	 * sizes to 128KB per 1GB of memory, min 128KB, max MAX_ORDER-1.
194 	 */
195 	if (!atomic_pool_size) {
196 		unsigned long pages = totalram_pages() / (SZ_1G / SZ_128K);
197 		pages = min_t(unsigned long, pages, MAX_ORDER_NR_PAGES);
198 		atomic_pool_size = max_t(size_t, pages << PAGE_SHIFT, SZ_128K);
199 	}
200 	INIT_WORK(&atomic_pool_work, atomic_pool_work_fn);
201 
202 	atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size,
203 						    GFP_KERNEL);
204 	if (!atomic_pool_kernel)
205 		ret = -ENOMEM;
206 	if (IS_ENABLED(CONFIG_ZONE_DMA)) {
207 		atomic_pool_dma = __dma_atomic_pool_init(atomic_pool_size,
208 						GFP_KERNEL | GFP_DMA);
209 		if (!atomic_pool_dma)
210 			ret = -ENOMEM;
211 	}
212 	if (IS_ENABLED(CONFIG_ZONE_DMA32)) {
213 		atomic_pool_dma32 = __dma_atomic_pool_init(atomic_pool_size,
214 						GFP_KERNEL | GFP_DMA32);
215 		if (!atomic_pool_dma32)
216 			ret = -ENOMEM;
217 	}
218 
219 	dma_atomic_pool_debugfs_init();
220 	return ret;
221 }
222 postcore_initcall(dma_atomic_pool_init);
223 
224 static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp)
225 {
226 	if (prev == NULL) {
227 		if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
228 			return atomic_pool_dma32;
229 		if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
230 			return atomic_pool_dma;
231 		return atomic_pool_kernel;
232 	}
233 	if (prev == atomic_pool_kernel)
234 		return atomic_pool_dma32 ? atomic_pool_dma32 : atomic_pool_dma;
235 	if (prev == atomic_pool_dma32)
236 		return atomic_pool_dma;
237 	return NULL;
238 }
239 
240 static struct page *__dma_alloc_from_pool(struct device *dev, size_t size,
241 		struct gen_pool *pool, void **cpu_addr,
242 		bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
243 {
244 	unsigned long addr;
245 	phys_addr_t phys;
246 
247 	addr = gen_pool_alloc(pool, size);
248 	if (!addr)
249 		return NULL;
250 
251 	phys = gen_pool_virt_to_phys(pool, addr);
252 	if (phys_addr_ok && !phys_addr_ok(dev, phys, size)) {
253 		gen_pool_free(pool, addr, size);
254 		return NULL;
255 	}
256 
257 	if (gen_pool_avail(pool) < atomic_pool_size)
258 		schedule_work(&atomic_pool_work);
259 
260 	*cpu_addr = (void *)addr;
261 	memset(*cpu_addr, 0, size);
262 	return pfn_to_page(__phys_to_pfn(phys));
263 }
264 
265 struct page *dma_alloc_from_pool(struct device *dev, size_t size,
266 		void **cpu_addr, gfp_t gfp,
267 		bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
268 {
269 	struct gen_pool *pool = NULL;
270 	struct page *page;
271 
272 	while ((pool = dma_guess_pool(pool, gfp))) {
273 		page = __dma_alloc_from_pool(dev, size, pool, cpu_addr,
274 					     phys_addr_ok);
275 		if (page)
276 			return page;
277 	}
278 
279 	WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev));
280 	return NULL;
281 }
282 
283 bool dma_free_from_pool(struct device *dev, void *start, size_t size)
284 {
285 	struct gen_pool *pool = NULL;
286 
287 	while ((pool = dma_guess_pool(pool, 0))) {
288 		if (!gen_pool_has_addr(pool, (unsigned long)start, size))
289 			continue;
290 		gen_pool_free(pool, (unsigned long)start, size);
291 		return true;
292 	}
293 
294 	return false;
295 }
296