xref: /openbmc/linux/kernel/dma/contiguous.c (revision f875db4f)
1cf65a0f6SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0+
2cf65a0f6SChristoph Hellwig /*
3cf65a0f6SChristoph Hellwig  * Contiguous Memory Allocator for DMA mapping framework
4cf65a0f6SChristoph Hellwig  * Copyright (c) 2010-2011 by Samsung Electronics.
5cf65a0f6SChristoph Hellwig  * Written by:
6cf65a0f6SChristoph Hellwig  *	Marek Szyprowski <m.szyprowski@samsung.com>
7cf65a0f6SChristoph Hellwig  *	Michal Nazarewicz <mina86@mina86.com>
80b1abd1fSChristoph Hellwig  *
90b1abd1fSChristoph Hellwig  * Contiguous Memory Allocator
100b1abd1fSChristoph Hellwig  *
110b1abd1fSChristoph Hellwig  *   The Contiguous Memory Allocator (CMA) makes it possible to
120b1abd1fSChristoph Hellwig  *   allocate big contiguous chunks of memory after the system has
130b1abd1fSChristoph Hellwig  *   booted.
140b1abd1fSChristoph Hellwig  *
150b1abd1fSChristoph Hellwig  * Why is it needed?
160b1abd1fSChristoph Hellwig  *
170b1abd1fSChristoph Hellwig  *   Various devices on embedded systems have no scatter-getter and/or
180b1abd1fSChristoph Hellwig  *   IO map support and require contiguous blocks of memory to
190b1abd1fSChristoph Hellwig  *   operate.  They include devices such as cameras, hardware video
200b1abd1fSChristoph Hellwig  *   coders, etc.
210b1abd1fSChristoph Hellwig  *
220b1abd1fSChristoph Hellwig  *   Such devices often require big memory buffers (a full HD frame
23819b70adStangjianqiang  *   is, for instance, more than 2 mega pixels large, i.e. more than 6
240b1abd1fSChristoph Hellwig  *   MB of memory), which makes mechanisms such as kmalloc() or
250b1abd1fSChristoph Hellwig  *   alloc_page() ineffective.
260b1abd1fSChristoph Hellwig  *
270b1abd1fSChristoph Hellwig  *   At the same time, a solution where a big memory region is
280b1abd1fSChristoph Hellwig  *   reserved for a device is suboptimal since often more memory is
290b1abd1fSChristoph Hellwig  *   reserved then strictly required and, moreover, the memory is
300b1abd1fSChristoph Hellwig  *   inaccessible to page system even if device drivers don't use it.
310b1abd1fSChristoph Hellwig  *
320b1abd1fSChristoph Hellwig  *   CMA tries to solve this issue by operating on memory regions
330b1abd1fSChristoph Hellwig  *   where only movable pages can be allocated from.  This way, kernel
340b1abd1fSChristoph Hellwig  *   can use the memory for pagecache and when device driver requests
350b1abd1fSChristoph Hellwig  *   it, allocated pages can be migrated.
36cf65a0f6SChristoph Hellwig  */
37cf65a0f6SChristoph Hellwig 
38cf65a0f6SChristoph Hellwig #define pr_fmt(fmt) "cma: " fmt
39cf65a0f6SChristoph Hellwig 
40cf65a0f6SChristoph Hellwig #ifdef CONFIG_CMA_DEBUG
41cf65a0f6SChristoph Hellwig #ifndef DEBUG
42cf65a0f6SChristoph Hellwig #  define DEBUG
43cf65a0f6SChristoph Hellwig #endif
44cf65a0f6SChristoph Hellwig #endif
45cf65a0f6SChristoph Hellwig 
46cf65a0f6SChristoph Hellwig #include <asm/page.h>
47cf65a0f6SChristoph Hellwig 
48cf65a0f6SChristoph Hellwig #include <linux/memblock.h>
49cf65a0f6SChristoph Hellwig #include <linux/err.h>
50cf65a0f6SChristoph Hellwig #include <linux/sizes.h>
510b1abd1fSChristoph Hellwig #include <linux/dma-map-ops.h>
52cf65a0f6SChristoph Hellwig #include <linux/cma.h>
53*bf29bfaaSYajun Deng #include <linux/nospec.h>
54cf65a0f6SChristoph Hellwig 
55cf65a0f6SChristoph Hellwig #ifdef CONFIG_CMA_SIZE_MBYTES
56cf65a0f6SChristoph Hellwig #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
57cf65a0f6SChristoph Hellwig #else
58cf65a0f6SChristoph Hellwig #define CMA_SIZE_MBYTES 0
59cf65a0f6SChristoph Hellwig #endif
60cf65a0f6SChristoph Hellwig 
61cf65a0f6SChristoph Hellwig struct cma *dma_contiguous_default_area;
62cf65a0f6SChristoph Hellwig 
63cf65a0f6SChristoph Hellwig /*
64cf65a0f6SChristoph Hellwig  * Default global CMA area size can be defined in kernel's .config.
65cf65a0f6SChristoph Hellwig  * This is useful mainly for distro maintainers to create a kernel
66cf65a0f6SChristoph Hellwig  * that works correctly for most supported systems.
67cf65a0f6SChristoph Hellwig  * The size can be set in bytes or as a percentage of the total memory
68cf65a0f6SChristoph Hellwig  * in the system.
69cf65a0f6SChristoph Hellwig  *
70cf65a0f6SChristoph Hellwig  * Users, who want to set the size of global CMA area for their system
71cf65a0f6SChristoph Hellwig  * should use cma= kernel parameter.
72cf65a0f6SChristoph Hellwig  */
73ca665368SShyam Saini static const phys_addr_t size_bytes __initconst =
74ca665368SShyam Saini 	(phys_addr_t)CMA_SIZE_MBYTES * SZ_1M;
75ca665368SShyam Saini static phys_addr_t  size_cmdline __initdata = -1;
76ca665368SShyam Saini static phys_addr_t base_cmdline __initdata;
77ca665368SShyam Saini static phys_addr_t limit_cmdline __initdata;
78cf65a0f6SChristoph Hellwig 
early_cma(char * p)79cf65a0f6SChristoph Hellwig static int __init early_cma(char *p)
80cf65a0f6SChristoph Hellwig {
81a3ceed87SHe Zhe 	if (!p) {
82a3ceed87SHe Zhe 		pr_err("Config string not provided\n");
83a3ceed87SHe Zhe 		return -EINVAL;
84a3ceed87SHe Zhe 	}
85a3ceed87SHe Zhe 
86cf65a0f6SChristoph Hellwig 	size_cmdline = memparse(p, &p);
87cf65a0f6SChristoph Hellwig 	if (*p != '@')
88cf65a0f6SChristoph Hellwig 		return 0;
89cf65a0f6SChristoph Hellwig 	base_cmdline = memparse(p + 1, &p);
90cf65a0f6SChristoph Hellwig 	if (*p != '-') {
91cf65a0f6SChristoph Hellwig 		limit_cmdline = base_cmdline + size_cmdline;
92cf65a0f6SChristoph Hellwig 		return 0;
93cf65a0f6SChristoph Hellwig 	}
94cf65a0f6SChristoph Hellwig 	limit_cmdline = memparse(p + 1, &p);
95cf65a0f6SChristoph Hellwig 
96cf65a0f6SChristoph Hellwig 	return 0;
97cf65a0f6SChristoph Hellwig }
98cf65a0f6SChristoph Hellwig early_param("cma", early_cma);
99cf65a0f6SChristoph Hellwig 
100*bf29bfaaSYajun Deng #ifdef CONFIG_DMA_NUMA_CMA
101b7176c26SBarry Song 
102*bf29bfaaSYajun Deng static struct cma *dma_contiguous_numa_area[MAX_NUMNODES];
103*bf29bfaaSYajun Deng static phys_addr_t numa_cma_size[MAX_NUMNODES] __initdata;
104b7176c26SBarry Song static struct cma *dma_contiguous_pernuma_area[MAX_NUMNODES];
105b7176c26SBarry Song static phys_addr_t pernuma_size_bytes __initdata;
106b7176c26SBarry Song 
early_numa_cma(char * p)107*bf29bfaaSYajun Deng static int __init early_numa_cma(char *p)
108*bf29bfaaSYajun Deng {
109*bf29bfaaSYajun Deng 	int nid, count = 0;
110*bf29bfaaSYajun Deng 	unsigned long tmp;
111*bf29bfaaSYajun Deng 	char *s = p;
112*bf29bfaaSYajun Deng 
113*bf29bfaaSYajun Deng 	while (*s) {
114*bf29bfaaSYajun Deng 		if (sscanf(s, "%lu%n", &tmp, &count) != 1)
115*bf29bfaaSYajun Deng 			break;
116*bf29bfaaSYajun Deng 
117*bf29bfaaSYajun Deng 		if (s[count] == ':') {
118*bf29bfaaSYajun Deng 			if (tmp >= MAX_NUMNODES)
119*bf29bfaaSYajun Deng 				break;
120*bf29bfaaSYajun Deng 			nid = array_index_nospec(tmp, MAX_NUMNODES);
121*bf29bfaaSYajun Deng 
122*bf29bfaaSYajun Deng 			s += count + 1;
123*bf29bfaaSYajun Deng 			tmp = memparse(s, &s);
124*bf29bfaaSYajun Deng 			numa_cma_size[nid] = tmp;
125*bf29bfaaSYajun Deng 
126*bf29bfaaSYajun Deng 			if (*s == ',')
127*bf29bfaaSYajun Deng 				s++;
128*bf29bfaaSYajun Deng 			else
129*bf29bfaaSYajun Deng 				break;
130*bf29bfaaSYajun Deng 		} else
131*bf29bfaaSYajun Deng 			break;
132*bf29bfaaSYajun Deng 	}
133*bf29bfaaSYajun Deng 
134*bf29bfaaSYajun Deng 	return 0;
135*bf29bfaaSYajun Deng }
136*bf29bfaaSYajun Deng early_param("numa_cma", early_numa_cma);
137*bf29bfaaSYajun Deng 
early_cma_pernuma(char * p)138b7176c26SBarry Song static int __init early_cma_pernuma(char *p)
139b7176c26SBarry Song {
140b7176c26SBarry Song 	pernuma_size_bytes = memparse(p, &p);
141b7176c26SBarry Song 	return 0;
142b7176c26SBarry Song }
143b7176c26SBarry Song early_param("cma_pernuma", early_cma_pernuma);
144b7176c26SBarry Song #endif
145b7176c26SBarry Song 
146cf65a0f6SChristoph Hellwig #ifdef CONFIG_CMA_SIZE_PERCENTAGE
147cf65a0f6SChristoph Hellwig 
cma_early_percent_memory(void)148cf65a0f6SChristoph Hellwig static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
149cf65a0f6SChristoph Hellwig {
150e9aa36ccSMike Rapoport 	unsigned long total_pages = PHYS_PFN(memblock_phys_mem_size());
151cf65a0f6SChristoph Hellwig 
152cf65a0f6SChristoph Hellwig 	return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
153cf65a0f6SChristoph Hellwig }
154cf65a0f6SChristoph Hellwig 
155cf65a0f6SChristoph Hellwig #else
156cf65a0f6SChristoph Hellwig 
cma_early_percent_memory(void)157cf65a0f6SChristoph Hellwig static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
158cf65a0f6SChristoph Hellwig {
159cf65a0f6SChristoph Hellwig 	return 0;
160cf65a0f6SChristoph Hellwig }
161cf65a0f6SChristoph Hellwig 
162cf65a0f6SChristoph Hellwig #endif
163cf65a0f6SChristoph Hellwig 
164*bf29bfaaSYajun Deng #ifdef CONFIG_DMA_NUMA_CMA
dma_numa_cma_reserve(void)165*bf29bfaaSYajun Deng static void __init dma_numa_cma_reserve(void)
166b7176c26SBarry Song {
167b7176c26SBarry Song 	int nid;
168b7176c26SBarry Song 
169*bf29bfaaSYajun Deng 	for_each_node(nid) {
170b7176c26SBarry Song 		int ret;
1712281f797SBarry Song 		char name[CMA_MAX_NAME];
172*bf29bfaaSYajun Deng 		struct cma **cma;
173b7176c26SBarry Song 
174*bf29bfaaSYajun Deng 		if (!node_online(nid)) {
175*bf29bfaaSYajun Deng 			if (pernuma_size_bytes || numa_cma_size[nid])
176*bf29bfaaSYajun Deng 				pr_warn("invalid node %d specified\n", nid);
177b7176c26SBarry Song 			continue;
178b7176c26SBarry Song 		}
179b7176c26SBarry Song 
180*bf29bfaaSYajun Deng 		if (pernuma_size_bytes) {
181*bf29bfaaSYajun Deng 
182*bf29bfaaSYajun Deng 			cma = &dma_contiguous_pernuma_area[nid];
183*bf29bfaaSYajun Deng 			snprintf(name, sizeof(name), "pernuma%d", nid);
184*bf29bfaaSYajun Deng 			ret = cma_declare_contiguous_nid(0, pernuma_size_bytes, 0, 0,
185*bf29bfaaSYajun Deng 							 0, false, name, cma, nid);
186*bf29bfaaSYajun Deng 			if (ret)
187*bf29bfaaSYajun Deng 				pr_warn("%s: reservation failed: err %d, node %d", __func__,
188*bf29bfaaSYajun Deng 					ret, nid);
189*bf29bfaaSYajun Deng 		}
190*bf29bfaaSYajun Deng 
191*bf29bfaaSYajun Deng 		if (numa_cma_size[nid]) {
192*bf29bfaaSYajun Deng 
193*bf29bfaaSYajun Deng 			cma = &dma_contiguous_numa_area[nid];
194*bf29bfaaSYajun Deng 			snprintf(name, sizeof(name), "numa%d", nid);
195*bf29bfaaSYajun Deng 			ret = cma_declare_contiguous_nid(0, numa_cma_size[nid], 0, 0, 0, false,
196*bf29bfaaSYajun Deng 							 name, cma, nid);
197*bf29bfaaSYajun Deng 			if (ret)
198*bf29bfaaSYajun Deng 				pr_warn("%s: reservation failed: err %d, node %d", __func__,
199*bf29bfaaSYajun Deng 					ret, nid);
200*bf29bfaaSYajun Deng 		}
201b7176c26SBarry Song 	}
202b7176c26SBarry Song }
20322e4a348SYajun Deng #else
dma_numa_cma_reserve(void)204*bf29bfaaSYajun Deng static inline void __init dma_numa_cma_reserve(void)
20522e4a348SYajun Deng {
20622e4a348SYajun Deng }
207b7176c26SBarry Song #endif
208b7176c26SBarry Song 
209cf65a0f6SChristoph Hellwig /**
210cf65a0f6SChristoph Hellwig  * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
211cf65a0f6SChristoph Hellwig  * @limit: End address of the reserved memory (optional, 0 for any).
212cf65a0f6SChristoph Hellwig  *
213cf65a0f6SChristoph Hellwig  * This function reserves memory from early allocator. It should be
214cf65a0f6SChristoph Hellwig  * called by arch specific code once the early allocator (memblock or bootmem)
215cf65a0f6SChristoph Hellwig  * has been activated and all other subsystems have already allocated/reserved
216cf65a0f6SChristoph Hellwig  * memory.
217cf65a0f6SChristoph Hellwig  */
dma_contiguous_reserve(phys_addr_t limit)218cf65a0f6SChristoph Hellwig void __init dma_contiguous_reserve(phys_addr_t limit)
219cf65a0f6SChristoph Hellwig {
220cf65a0f6SChristoph Hellwig 	phys_addr_t selected_size = 0;
221cf65a0f6SChristoph Hellwig 	phys_addr_t selected_base = 0;
222cf65a0f6SChristoph Hellwig 	phys_addr_t selected_limit = limit;
223cf65a0f6SChristoph Hellwig 	bool fixed = false;
224cf65a0f6SChristoph Hellwig 
225*bf29bfaaSYajun Deng 	dma_numa_cma_reserve();
22622e4a348SYajun Deng 
227cf65a0f6SChristoph Hellwig 	pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
228cf65a0f6SChristoph Hellwig 
229cf65a0f6SChristoph Hellwig 	if (size_cmdline != -1) {
230cf65a0f6SChristoph Hellwig 		selected_size = size_cmdline;
231cf65a0f6SChristoph Hellwig 		selected_base = base_cmdline;
232cf65a0f6SChristoph Hellwig 		selected_limit = min_not_zero(limit_cmdline, limit);
233cf65a0f6SChristoph Hellwig 		if (base_cmdline + size_cmdline == limit_cmdline)
234cf65a0f6SChristoph Hellwig 			fixed = true;
235cf65a0f6SChristoph Hellwig 	} else {
236cf65a0f6SChristoph Hellwig #ifdef CONFIG_CMA_SIZE_SEL_MBYTES
237cf65a0f6SChristoph Hellwig 		selected_size = size_bytes;
238cf65a0f6SChristoph Hellwig #elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
239cf65a0f6SChristoph Hellwig 		selected_size = cma_early_percent_memory();
240cf65a0f6SChristoph Hellwig #elif defined(CONFIG_CMA_SIZE_SEL_MIN)
241cf65a0f6SChristoph Hellwig 		selected_size = min(size_bytes, cma_early_percent_memory());
242cf65a0f6SChristoph Hellwig #elif defined(CONFIG_CMA_SIZE_SEL_MAX)
243cf65a0f6SChristoph Hellwig 		selected_size = max(size_bytes, cma_early_percent_memory());
244cf65a0f6SChristoph Hellwig #endif
245cf65a0f6SChristoph Hellwig 	}
246cf65a0f6SChristoph Hellwig 
247cf65a0f6SChristoph Hellwig 	if (selected_size && !dma_contiguous_default_area) {
248cf65a0f6SChristoph Hellwig 		pr_debug("%s: reserving %ld MiB for global area\n", __func__,
249cf65a0f6SChristoph Hellwig 			 (unsigned long)selected_size / SZ_1M);
250cf65a0f6SChristoph Hellwig 
251cf65a0f6SChristoph Hellwig 		dma_contiguous_reserve_area(selected_size, selected_base,
252cf65a0f6SChristoph Hellwig 					    selected_limit,
253cf65a0f6SChristoph Hellwig 					    &dma_contiguous_default_area,
254cf65a0f6SChristoph Hellwig 					    fixed);
255cf65a0f6SChristoph Hellwig 	}
256cf65a0f6SChristoph Hellwig }
257cf65a0f6SChristoph Hellwig 
2585db5d930SChristoph Hellwig void __weak
dma_contiguous_early_fixup(phys_addr_t base,unsigned long size)2595db5d930SChristoph Hellwig dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
2605db5d930SChristoph Hellwig {
2615db5d930SChristoph Hellwig }
2625db5d930SChristoph Hellwig 
263cf65a0f6SChristoph Hellwig /**
264cf65a0f6SChristoph Hellwig  * dma_contiguous_reserve_area() - reserve custom contiguous area
265cf65a0f6SChristoph Hellwig  * @size: Size of the reserved area (in bytes),
266cf65a0f6SChristoph Hellwig  * @base: Base address of the reserved area optional, use 0 for any
267cf65a0f6SChristoph Hellwig  * @limit: End address of the reserved memory (optional, 0 for any).
268cf65a0f6SChristoph Hellwig  * @res_cma: Pointer to store the created cma region.
269cf65a0f6SChristoph Hellwig  * @fixed: hint about where to place the reserved area
270cf65a0f6SChristoph Hellwig  *
271cf65a0f6SChristoph Hellwig  * This function reserves memory from early allocator. It should be
272cf65a0f6SChristoph Hellwig  * called by arch specific code once the early allocator (memblock or bootmem)
273cf65a0f6SChristoph Hellwig  * has been activated and all other subsystems have already allocated/reserved
274cf65a0f6SChristoph Hellwig  * memory. This function allows to create custom reserved areas for specific
275cf65a0f6SChristoph Hellwig  * devices.
276cf65a0f6SChristoph Hellwig  *
277cf65a0f6SChristoph Hellwig  * If @fixed is true, reserve contiguous area at exactly @base.  If false,
278cf65a0f6SChristoph Hellwig  * reserve in range from @base to @limit.
279cf65a0f6SChristoph Hellwig  */
dma_contiguous_reserve_area(phys_addr_t size,phys_addr_t base,phys_addr_t limit,struct cma ** res_cma,bool fixed)280cf65a0f6SChristoph Hellwig int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
281cf65a0f6SChristoph Hellwig 				       phys_addr_t limit, struct cma **res_cma,
282cf65a0f6SChristoph Hellwig 				       bool fixed)
283cf65a0f6SChristoph Hellwig {
284cf65a0f6SChristoph Hellwig 	int ret;
285cf65a0f6SChristoph Hellwig 
286cf65a0f6SChristoph Hellwig 	ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed,
287cf65a0f6SChristoph Hellwig 					"reserved", res_cma);
288cf65a0f6SChristoph Hellwig 	if (ret)
289cf65a0f6SChristoph Hellwig 		return ret;
290cf65a0f6SChristoph Hellwig 
291cf65a0f6SChristoph Hellwig 	/* Architecture specific contiguous memory fixup. */
292cf65a0f6SChristoph Hellwig 	dma_contiguous_early_fixup(cma_get_base(*res_cma),
293cf65a0f6SChristoph Hellwig 				cma_get_size(*res_cma));
294cf65a0f6SChristoph Hellwig 
295cf65a0f6SChristoph Hellwig 	return 0;
296cf65a0f6SChristoph Hellwig }
297cf65a0f6SChristoph Hellwig 
298cf65a0f6SChristoph Hellwig /**
299cf65a0f6SChristoph Hellwig  * dma_alloc_from_contiguous() - allocate pages from contiguous area
300cf65a0f6SChristoph Hellwig  * @dev:   Pointer to device for which the allocation is performed.
301cf65a0f6SChristoph Hellwig  * @count: Requested number of pages.
302cf65a0f6SChristoph Hellwig  * @align: Requested alignment of pages (in PAGE_SIZE order).
303d834c5abSMarek Szyprowski  * @no_warn: Avoid printing message about failed allocation.
304cf65a0f6SChristoph Hellwig  *
305cf65a0f6SChristoph Hellwig  * This function allocates memory buffer for specified device. It uses
306cf65a0f6SChristoph Hellwig  * device specific contiguous memory area if available or the default
307cf65a0f6SChristoph Hellwig  * global one. Requires architecture specific dev_get_cma_area() helper
308cf65a0f6SChristoph Hellwig  * function.
309cf65a0f6SChristoph Hellwig  */
dma_alloc_from_contiguous(struct device * dev,size_t count,unsigned int align,bool no_warn)310cf65a0f6SChristoph Hellwig struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
311d834c5abSMarek Szyprowski 				       unsigned int align, bool no_warn)
312cf65a0f6SChristoph Hellwig {
313cf65a0f6SChristoph Hellwig 	if (align > CONFIG_CMA_ALIGNMENT)
314cf65a0f6SChristoph Hellwig 		align = CONFIG_CMA_ALIGNMENT;
315cf65a0f6SChristoph Hellwig 
316d834c5abSMarek Szyprowski 	return cma_alloc(dev_get_cma_area(dev), count, align, no_warn);
317cf65a0f6SChristoph Hellwig }
318cf65a0f6SChristoph Hellwig 
319cf65a0f6SChristoph Hellwig /**
320cf65a0f6SChristoph Hellwig  * dma_release_from_contiguous() - release allocated pages
321cf65a0f6SChristoph Hellwig  * @dev:   Pointer to device for which the pages were allocated.
322cf65a0f6SChristoph Hellwig  * @pages: Allocated pages.
323cf65a0f6SChristoph Hellwig  * @count: Number of allocated pages.
324cf65a0f6SChristoph Hellwig  *
325cf65a0f6SChristoph Hellwig  * This function releases memory allocated by dma_alloc_from_contiguous().
326cf65a0f6SChristoph Hellwig  * It returns false when provided pages do not belong to contiguous area and
327cf65a0f6SChristoph Hellwig  * true otherwise.
328cf65a0f6SChristoph Hellwig  */
dma_release_from_contiguous(struct device * dev,struct page * pages,int count)329cf65a0f6SChristoph Hellwig bool dma_release_from_contiguous(struct device *dev, struct page *pages,
330cf65a0f6SChristoph Hellwig 				 int count)
331cf65a0f6SChristoph Hellwig {
332cf65a0f6SChristoph Hellwig 	return cma_release(dev_get_cma_area(dev), pages, count);
333cf65a0f6SChristoph Hellwig }
334cf65a0f6SChristoph Hellwig 
cma_alloc_aligned(struct cma * cma,size_t size,gfp_t gfp)335274b3f7bSChristoph Hellwig static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp)
336274b3f7bSChristoph Hellwig {
337274b3f7bSChristoph Hellwig 	unsigned int align = min(get_order(size), CONFIG_CMA_ALIGNMENT);
338274b3f7bSChristoph Hellwig 
339274b3f7bSChristoph Hellwig 	return cma_alloc(cma, size >> PAGE_SHIFT, align, gfp & __GFP_NOWARN);
340274b3f7bSChristoph Hellwig }
341274b3f7bSChristoph Hellwig 
342b1d2dc00SNicolin Chen /**
343b1d2dc00SNicolin Chen  * dma_alloc_contiguous() - allocate contiguous pages
344b1d2dc00SNicolin Chen  * @dev:   Pointer to device for which the allocation is performed.
345b1d2dc00SNicolin Chen  * @size:  Requested allocation size.
346b1d2dc00SNicolin Chen  * @gfp:   Allocation flags.
347b1d2dc00SNicolin Chen  *
348b7176c26SBarry Song  * tries to use device specific contiguous memory area if available, or it
349b7176c26SBarry Song  * tries to use per-numa cma, if the allocation fails, it will fallback to
350b7176c26SBarry Song  * try default global one.
351bd2e7563SNicolin Chen  *
352b7176c26SBarry Song  * Note that it bypass one-page size of allocations from the per-numa and
353b7176c26SBarry Song  * global area as the addresses within one page are always contiguous, so
354b7176c26SBarry Song  * there is no need to waste CMA pages for that kind; it also helps reduce
355b7176c26SBarry Song  * fragmentations.
356b1d2dc00SNicolin Chen  */
dma_alloc_contiguous(struct device * dev,size_t size,gfp_t gfp)357b1d2dc00SNicolin Chen struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
358b1d2dc00SNicolin Chen {
359*bf29bfaaSYajun Deng #ifdef CONFIG_DMA_NUMA_CMA
360b7176c26SBarry Song 	int nid = dev_to_node(dev);
361b7176c26SBarry Song #endif
362b7176c26SBarry Song 
363b1d2dc00SNicolin Chen 	/* CMA can be used only in the context which permits sleeping */
364274b3f7bSChristoph Hellwig 	if (!gfpflags_allow_blocking(gfp))
365274b3f7bSChristoph Hellwig 		return NULL;
366274b3f7bSChristoph Hellwig 	if (dev->cma_area)
367274b3f7bSChristoph Hellwig 		return cma_alloc_aligned(dev->cma_area, size, gfp);
368b7176c26SBarry Song 	if (size <= PAGE_SIZE)
369274b3f7bSChristoph Hellwig 		return NULL;
370b7176c26SBarry Song 
371*bf29bfaaSYajun Deng #ifdef CONFIG_DMA_NUMA_CMA
372b7176c26SBarry Song 	if (nid != NUMA_NO_NODE && !(gfp & (GFP_DMA | GFP_DMA32))) {
373b7176c26SBarry Song 		struct cma *cma = dma_contiguous_pernuma_area[nid];
374b7176c26SBarry Song 		struct page *page;
375b7176c26SBarry Song 
376b7176c26SBarry Song 		if (cma) {
377b7176c26SBarry Song 			page = cma_alloc_aligned(cma, size, gfp);
378b7176c26SBarry Song 			if (page)
379b7176c26SBarry Song 				return page;
380b7176c26SBarry Song 		}
381*bf29bfaaSYajun Deng 
382*bf29bfaaSYajun Deng 		cma = dma_contiguous_numa_area[nid];
383*bf29bfaaSYajun Deng 		if (cma) {
384*bf29bfaaSYajun Deng 			page = cma_alloc_aligned(cma, size, gfp);
385*bf29bfaaSYajun Deng 			if (page)
386*bf29bfaaSYajun Deng 				return page;
387*bf29bfaaSYajun Deng 		}
388b7176c26SBarry Song 	}
389b7176c26SBarry Song #endif
390b7176c26SBarry Song 	if (!dma_contiguous_default_area)
391b7176c26SBarry Song 		return NULL;
392b7176c26SBarry Song 
393274b3f7bSChristoph Hellwig 	return cma_alloc_aligned(dma_contiguous_default_area, size, gfp);
394b1d2dc00SNicolin Chen }
395b1d2dc00SNicolin Chen 
396b1d2dc00SNicolin Chen /**
397b1d2dc00SNicolin Chen  * dma_free_contiguous() - release allocated pages
398b1d2dc00SNicolin Chen  * @dev:   Pointer to device for which the pages were allocated.
399b1d2dc00SNicolin Chen  * @page:  Pointer to the allocated pages.
400b1d2dc00SNicolin Chen  * @size:  Size of allocated pages.
401b1d2dc00SNicolin Chen  *
402b1d2dc00SNicolin Chen  * This function releases memory allocated by dma_alloc_contiguous(). As the
403b1d2dc00SNicolin Chen  * cma_release returns false when provided pages do not belong to contiguous
404b1d2dc00SNicolin Chen  * area and true otherwise, this function then does a fallback __free_pages()
405b1d2dc00SNicolin Chen  * upon a false-return.
406b1d2dc00SNicolin Chen  */
dma_free_contiguous(struct device * dev,struct page * page,size_t size)407b1d2dc00SNicolin Chen void dma_free_contiguous(struct device *dev, struct page *page, size_t size)
408b1d2dc00SNicolin Chen {
409b7176c26SBarry Song 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
410b7176c26SBarry Song 
411b7176c26SBarry Song 	/* if dev has its own cma, free page from there */
412b7176c26SBarry Song 	if (dev->cma_area) {
413b7176c26SBarry Song 		if (cma_release(dev->cma_area, page, count))
414b7176c26SBarry Song 			return;
415b7176c26SBarry Song 	} else {
416b7176c26SBarry Song 		/*
417b7176c26SBarry Song 		 * otherwise, page is from either per-numa cma or default cma
418b7176c26SBarry Song 		 */
419*bf29bfaaSYajun Deng #ifdef CONFIG_DMA_NUMA_CMA
420b7176c26SBarry Song 		if (cma_release(dma_contiguous_pernuma_area[page_to_nid(page)],
421b7176c26SBarry Song 					page, count))
422b7176c26SBarry Song 			return;
423*bf29bfaaSYajun Deng 		if (cma_release(dma_contiguous_numa_area[page_to_nid(page)],
424*bf29bfaaSYajun Deng 					page, count))
425*bf29bfaaSYajun Deng 			return;
426b7176c26SBarry Song #endif
427b7176c26SBarry Song 		if (cma_release(dma_contiguous_default_area, page, count))
428b7176c26SBarry Song 			return;
429b7176c26SBarry Song 	}
430b7176c26SBarry Song 
431b7176c26SBarry Song 	/* not in any cma, free from buddy */
432b1d2dc00SNicolin Chen 	__free_pages(page, get_order(size));
433b1d2dc00SNicolin Chen }
434b1d2dc00SNicolin Chen 
435cf65a0f6SChristoph Hellwig /*
436cf65a0f6SChristoph Hellwig  * Support for reserved memory regions defined in device tree
437cf65a0f6SChristoph Hellwig  */
438cf65a0f6SChristoph Hellwig #ifdef CONFIG_OF_RESERVED_MEM
439cf65a0f6SChristoph Hellwig #include <linux/of.h>
440cf65a0f6SChristoph Hellwig #include <linux/of_fdt.h>
441cf65a0f6SChristoph Hellwig #include <linux/of_reserved_mem.h>
442cf65a0f6SChristoph Hellwig 
443cf65a0f6SChristoph Hellwig #undef pr_fmt
444cf65a0f6SChristoph Hellwig #define pr_fmt(fmt) fmt
445cf65a0f6SChristoph Hellwig 
rmem_cma_device_init(struct reserved_mem * rmem,struct device * dev)446cf65a0f6SChristoph Hellwig static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev)
447cf65a0f6SChristoph Hellwig {
4485af63893SChristoph Hellwig 	dev->cma_area = rmem->priv;
449cf65a0f6SChristoph Hellwig 	return 0;
450cf65a0f6SChristoph Hellwig }
451cf65a0f6SChristoph Hellwig 
rmem_cma_device_release(struct reserved_mem * rmem,struct device * dev)452cf65a0f6SChristoph Hellwig static void rmem_cma_device_release(struct reserved_mem *rmem,
453cf65a0f6SChristoph Hellwig 				    struct device *dev)
454cf65a0f6SChristoph Hellwig {
4555af63893SChristoph Hellwig 	dev->cma_area = NULL;
456cf65a0f6SChristoph Hellwig }
457cf65a0f6SChristoph Hellwig 
458cf65a0f6SChristoph Hellwig static const struct reserved_mem_ops rmem_cma_ops = {
459cf65a0f6SChristoph Hellwig 	.device_init	= rmem_cma_device_init,
460cf65a0f6SChristoph Hellwig 	.device_release = rmem_cma_device_release,
461cf65a0f6SChristoph Hellwig };
462cf65a0f6SChristoph Hellwig 
rmem_cma_setup(struct reserved_mem * rmem)463cf65a0f6SChristoph Hellwig static int __init rmem_cma_setup(struct reserved_mem *rmem)
464cf65a0f6SChristoph Hellwig {
465cf65a0f6SChristoph Hellwig 	unsigned long node = rmem->fdt_node;
4668c8c5a49SNicolas Saenz Julienne 	bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
467cf65a0f6SChristoph Hellwig 	struct cma *cma;
468cf65a0f6SChristoph Hellwig 	int err;
469cf65a0f6SChristoph Hellwig 
4708c8c5a49SNicolas Saenz Julienne 	if (size_cmdline != -1 && default_cma) {
4718c8c5a49SNicolas Saenz Julienne 		pr_info("Reserved memory: bypass %s node, using cmdline CMA params instead\n",
4728c8c5a49SNicolas Saenz Julienne 			rmem->name);
4738c8c5a49SNicolas Saenz Julienne 		return -EBUSY;
4748c8c5a49SNicolas Saenz Julienne 	}
4758c8c5a49SNicolas Saenz Julienne 
476cf65a0f6SChristoph Hellwig 	if (!of_get_flat_dt_prop(node, "reusable", NULL) ||
477cf65a0f6SChristoph Hellwig 	    of_get_flat_dt_prop(node, "no-map", NULL))
478cf65a0f6SChristoph Hellwig 		return -EINVAL;
479cf65a0f6SChristoph Hellwig 
480e16faf26SDavid Hildenbrand 	if (!IS_ALIGNED(rmem->base | rmem->size, CMA_MIN_ALIGNMENT_BYTES)) {
481cf65a0f6SChristoph Hellwig 		pr_err("Reserved memory: incorrect alignment of CMA region\n");
482cf65a0f6SChristoph Hellwig 		return -EINVAL;
483cf65a0f6SChristoph Hellwig 	}
484cf65a0f6SChristoph Hellwig 
485cf65a0f6SChristoph Hellwig 	err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma);
486cf65a0f6SChristoph Hellwig 	if (err) {
487cf65a0f6SChristoph Hellwig 		pr_err("Reserved memory: unable to setup CMA region\n");
488cf65a0f6SChristoph Hellwig 		return err;
489cf65a0f6SChristoph Hellwig 	}
490cf65a0f6SChristoph Hellwig 	/* Architecture specific contiguous memory fixup. */
491cf65a0f6SChristoph Hellwig 	dma_contiguous_early_fixup(rmem->base, rmem->size);
492cf65a0f6SChristoph Hellwig 
4938c8c5a49SNicolas Saenz Julienne 	if (default_cma)
494580a0cc9SChristoph Hellwig 		dma_contiguous_default_area = cma;
495cf65a0f6SChristoph Hellwig 
496cf65a0f6SChristoph Hellwig 	rmem->ops = &rmem_cma_ops;
497cf65a0f6SChristoph Hellwig 	rmem->priv = cma;
498cf65a0f6SChristoph Hellwig 
499cf65a0f6SChristoph Hellwig 	pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n",
500cf65a0f6SChristoph Hellwig 		&rmem->base, (unsigned long)rmem->size / SZ_1M);
501cf65a0f6SChristoph Hellwig 
502cf65a0f6SChristoph Hellwig 	return 0;
503cf65a0f6SChristoph Hellwig }
504cf65a0f6SChristoph Hellwig RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);
505cf65a0f6SChristoph Hellwig #endif
506