xref: /openbmc/linux/lib/genalloc.c (revision b7019ac5)
1 /*
2  * Basic general purpose allocator for managing special purpose
3  * memory, for example, memory that is not managed by the regular
4  * kmalloc/kfree interface.  Uses for this includes on-device special
5  * memory, uncached memory etc.
6  *
7  * It is safe to use the allocator in NMI handlers and other special
8  * unblockable contexts that could otherwise deadlock on locks.  This
9  * is implemented by using atomic operations and retries on any
10  * conflicts.  The disadvantage is that there may be livelocks in
11  * extreme cases.  For better scalability, one allocator can be used
12  * for each CPU.
13  *
14  * The lockless operation only works if there is enough memory
15  * available.  If new memory is added to the pool a lock has to be
16  * still taken.  So any user relying on locklessness has to ensure
17  * that sufficient memory is preallocated.
18  *
19  * The basic atomic operation of this allocator is cmpxchg on long.
20  * On architectures that don't have NMI-safe cmpxchg implementation,
21  * the allocator can NOT be used in NMI handler.  So code uses the
22  * allocator in NMI handler should depend on
23  * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
24  *
25  * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
26  *
27  * This source code is licensed under the GNU General Public License,
28  * Version 2.  See the file COPYING for more details.
29  */
30 
31 #include <linux/slab.h>
32 #include <linux/export.h>
33 #include <linux/bitmap.h>
34 #include <linux/rculist.h>
35 #include <linux/interrupt.h>
36 #include <linux/genalloc.h>
37 #include <linux/of_device.h>
38 #include <linux/vmalloc.h>
39 
40 static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
41 {
42 	return chunk->end_addr - chunk->start_addr + 1;
43 }
44 
45 static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
46 {
47 	unsigned long val, nval;
48 
49 	nval = *addr;
50 	do {
51 		val = nval;
52 		if (val & mask_to_set)
53 			return -EBUSY;
54 		cpu_relax();
55 	} while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
56 
57 	return 0;
58 }
59 
60 static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
61 {
62 	unsigned long val, nval;
63 
64 	nval = *addr;
65 	do {
66 		val = nval;
67 		if ((val & mask_to_clear) != mask_to_clear)
68 			return -EBUSY;
69 		cpu_relax();
70 	} while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
71 
72 	return 0;
73 }
74 
75 /*
76  * bitmap_set_ll - set the specified number of bits at the specified position
77  * @map: pointer to a bitmap
78  * @start: a bit position in @map
79  * @nr: number of bits to set
80  *
81  * Set @nr bits start from @start in @map lock-lessly. Several users
82  * can set/clear the same bitmap simultaneously without lock. If two
83  * users set the same bit, one user will return remain bits, otherwise
84  * return 0.
85  */
86 static int bitmap_set_ll(unsigned long *map, int start, int nr)
87 {
88 	unsigned long *p = map + BIT_WORD(start);
89 	const int size = start + nr;
90 	int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
91 	unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
92 
93 	while (nr - bits_to_set >= 0) {
94 		if (set_bits_ll(p, mask_to_set))
95 			return nr;
96 		nr -= bits_to_set;
97 		bits_to_set = BITS_PER_LONG;
98 		mask_to_set = ~0UL;
99 		p++;
100 	}
101 	if (nr) {
102 		mask_to_set &= BITMAP_LAST_WORD_MASK(size);
103 		if (set_bits_ll(p, mask_to_set))
104 			return nr;
105 	}
106 
107 	return 0;
108 }
109 
110 /*
111  * bitmap_clear_ll - clear the specified number of bits at the specified position
112  * @map: pointer to a bitmap
113  * @start: a bit position in @map
114  * @nr: number of bits to set
115  *
116  * Clear @nr bits start from @start in @map lock-lessly. Several users
117  * can set/clear the same bitmap simultaneously without lock. If two
118  * users clear the same bit, one user will return remain bits,
119  * otherwise return 0.
120  */
121 static int bitmap_clear_ll(unsigned long *map, int start, int nr)
122 {
123 	unsigned long *p = map + BIT_WORD(start);
124 	const int size = start + nr;
125 	int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
126 	unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
127 
128 	while (nr - bits_to_clear >= 0) {
129 		if (clear_bits_ll(p, mask_to_clear))
130 			return nr;
131 		nr -= bits_to_clear;
132 		bits_to_clear = BITS_PER_LONG;
133 		mask_to_clear = ~0UL;
134 		p++;
135 	}
136 	if (nr) {
137 		mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
138 		if (clear_bits_ll(p, mask_to_clear))
139 			return nr;
140 	}
141 
142 	return 0;
143 }
144 
145 /**
146  * gen_pool_create - create a new special memory pool
147  * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
148  * @nid: node id of the node the pool structure should be allocated on, or -1
149  *
150  * Create a new special memory pool that can be used to manage special purpose
151  * memory not managed by the regular kmalloc/kfree interface.
152  */
153 struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
154 {
155 	struct gen_pool *pool;
156 
157 	pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
158 	if (pool != NULL) {
159 		spin_lock_init(&pool->lock);
160 		INIT_LIST_HEAD(&pool->chunks);
161 		pool->min_alloc_order = min_alloc_order;
162 		pool->algo = gen_pool_first_fit;
163 		pool->data = NULL;
164 		pool->name = NULL;
165 	}
166 	return pool;
167 }
168 EXPORT_SYMBOL(gen_pool_create);
169 
170 /**
171  * gen_pool_add_owner- add a new chunk of special memory to the pool
172  * @pool: pool to add new memory chunk to
173  * @virt: virtual starting address of memory chunk to add to pool
174  * @phys: physical starting address of memory chunk to add to pool
175  * @size: size in bytes of the memory chunk to add to pool
176  * @nid: node id of the node the chunk structure and bitmap should be
177  *       allocated on, or -1
178  * @owner: private data the publisher would like to recall at alloc time
179  *
180  * Add a new chunk of special memory to the specified pool.
181  *
182  * Returns 0 on success or a -ve errno on failure.
183  */
184 int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
185 		 size_t size, int nid, void *owner)
186 {
187 	struct gen_pool_chunk *chunk;
188 	int nbits = size >> pool->min_alloc_order;
189 	int nbytes = sizeof(struct gen_pool_chunk) +
190 				BITS_TO_LONGS(nbits) * sizeof(long);
191 
192 	chunk = vzalloc_node(nbytes, nid);
193 	if (unlikely(chunk == NULL))
194 		return -ENOMEM;
195 
196 	chunk->phys_addr = phys;
197 	chunk->start_addr = virt;
198 	chunk->end_addr = virt + size - 1;
199 	chunk->owner = owner;
200 	atomic_long_set(&chunk->avail, size);
201 
202 	spin_lock(&pool->lock);
203 	list_add_rcu(&chunk->next_chunk, &pool->chunks);
204 	spin_unlock(&pool->lock);
205 
206 	return 0;
207 }
208 EXPORT_SYMBOL(gen_pool_add_owner);
209 
210 /**
211  * gen_pool_virt_to_phys - return the physical address of memory
212  * @pool: pool to allocate from
213  * @addr: starting address of memory
214  *
215  * Returns the physical address on success, or -1 on error.
216  */
217 phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
218 {
219 	struct gen_pool_chunk *chunk;
220 	phys_addr_t paddr = -1;
221 
222 	rcu_read_lock();
223 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
224 		if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
225 			paddr = chunk->phys_addr + (addr - chunk->start_addr);
226 			break;
227 		}
228 	}
229 	rcu_read_unlock();
230 
231 	return paddr;
232 }
233 EXPORT_SYMBOL(gen_pool_virt_to_phys);
234 
235 /**
236  * gen_pool_destroy - destroy a special memory pool
237  * @pool: pool to destroy
238  *
239  * Destroy the specified special memory pool. Verifies that there are no
240  * outstanding allocations.
241  */
242 void gen_pool_destroy(struct gen_pool *pool)
243 {
244 	struct list_head *_chunk, *_next_chunk;
245 	struct gen_pool_chunk *chunk;
246 	int order = pool->min_alloc_order;
247 	int bit, end_bit;
248 
249 	list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
250 		chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
251 		list_del(&chunk->next_chunk);
252 
253 		end_bit = chunk_size(chunk) >> order;
254 		bit = find_next_bit(chunk->bits, end_bit, 0);
255 		BUG_ON(bit < end_bit);
256 
257 		vfree(chunk);
258 	}
259 	kfree_const(pool->name);
260 	kfree(pool);
261 }
262 EXPORT_SYMBOL(gen_pool_destroy);
263 
264 /**
265  * gen_pool_alloc_algo_owner - allocate special memory from the pool
266  * @pool: pool to allocate from
267  * @size: number of bytes to allocate from the pool
268  * @algo: algorithm passed from caller
269  * @data: data passed to algorithm
270  * @owner: optionally retrieve the chunk owner
271  *
272  * Allocate the requested number of bytes from the specified pool.
273  * Uses the pool allocation function (with first-fit algorithm by default).
274  * Can not be used in NMI handler on architectures without
275  * NMI-safe cmpxchg implementation.
276  */
277 unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
278 		genpool_algo_t algo, void *data, void **owner)
279 {
280 	struct gen_pool_chunk *chunk;
281 	unsigned long addr = 0;
282 	int order = pool->min_alloc_order;
283 	int nbits, start_bit, end_bit, remain;
284 
285 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
286 	BUG_ON(in_nmi());
287 #endif
288 
289 	if (owner)
290 		*owner = NULL;
291 
292 	if (size == 0)
293 		return 0;
294 
295 	nbits = (size + (1UL << order) - 1) >> order;
296 	rcu_read_lock();
297 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
298 		if (size > atomic_long_read(&chunk->avail))
299 			continue;
300 
301 		start_bit = 0;
302 		end_bit = chunk_size(chunk) >> order;
303 retry:
304 		start_bit = algo(chunk->bits, end_bit, start_bit,
305 				 nbits, data, pool, chunk->start_addr);
306 		if (start_bit >= end_bit)
307 			continue;
308 		remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
309 		if (remain) {
310 			remain = bitmap_clear_ll(chunk->bits, start_bit,
311 						 nbits - remain);
312 			BUG_ON(remain);
313 			goto retry;
314 		}
315 
316 		addr = chunk->start_addr + ((unsigned long)start_bit << order);
317 		size = nbits << order;
318 		atomic_long_sub(size, &chunk->avail);
319 		if (owner)
320 			*owner = chunk->owner;
321 		break;
322 	}
323 	rcu_read_unlock();
324 	return addr;
325 }
326 EXPORT_SYMBOL(gen_pool_alloc_algo_owner);
327 
328 /**
329  * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
330  * @pool: pool to allocate from
331  * @size: number of bytes to allocate from the pool
332  * @dma: dma-view physical address return value.  Use NULL if unneeded.
333  *
334  * Allocate the requested number of bytes from the specified pool.
335  * Uses the pool allocation function (with first-fit algorithm by default).
336  * Can not be used in NMI handler on architectures without
337  * NMI-safe cmpxchg implementation.
338  */
339 void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
340 {
341 	unsigned long vaddr;
342 
343 	if (!pool)
344 		return NULL;
345 
346 	vaddr = gen_pool_alloc(pool, size);
347 	if (!vaddr)
348 		return NULL;
349 
350 	if (dma)
351 		*dma = gen_pool_virt_to_phys(pool, vaddr);
352 
353 	return (void *)vaddr;
354 }
355 EXPORT_SYMBOL(gen_pool_dma_alloc);
356 
357 /**
358  * gen_pool_free - free allocated special memory back to the pool
359  * @pool: pool to free to
360  * @addr: starting address of memory to free back to pool
361  * @size: size in bytes of memory to free
362  * @owner: private data stashed at gen_pool_add() time
363  *
364  * Free previously allocated special memory back to the specified
365  * pool.  Can not be used in NMI handler on architectures without
366  * NMI-safe cmpxchg implementation.
367  */
368 void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size,
369 		void **owner)
370 {
371 	struct gen_pool_chunk *chunk;
372 	int order = pool->min_alloc_order;
373 	int start_bit, nbits, remain;
374 
375 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
376 	BUG_ON(in_nmi());
377 #endif
378 
379 	if (owner)
380 		*owner = NULL;
381 
382 	nbits = (size + (1UL << order) - 1) >> order;
383 	rcu_read_lock();
384 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
385 		if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
386 			BUG_ON(addr + size - 1 > chunk->end_addr);
387 			start_bit = (addr - chunk->start_addr) >> order;
388 			remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
389 			BUG_ON(remain);
390 			size = nbits << order;
391 			atomic_long_add(size, &chunk->avail);
392 			if (owner)
393 				*owner = chunk->owner;
394 			rcu_read_unlock();
395 			return;
396 		}
397 	}
398 	rcu_read_unlock();
399 	BUG();
400 }
401 EXPORT_SYMBOL(gen_pool_free_owner);
402 
403 /**
404  * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
405  * @pool:	the generic memory pool
406  * @func:	func to call
407  * @data:	additional data used by @func
408  *
409  * Call @func for every chunk of generic memory pool.  The @func is
410  * called with rcu_read_lock held.
411  */
412 void gen_pool_for_each_chunk(struct gen_pool *pool,
413 	void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
414 	void *data)
415 {
416 	struct gen_pool_chunk *chunk;
417 
418 	rcu_read_lock();
419 	list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
420 		func(pool, chunk, data);
421 	rcu_read_unlock();
422 }
423 EXPORT_SYMBOL(gen_pool_for_each_chunk);
424 
425 /**
426  * addr_in_gen_pool - checks if an address falls within the range of a pool
427  * @pool:	the generic memory pool
428  * @start:	start address
429  * @size:	size of the region
430  *
431  * Check if the range of addresses falls within the specified pool. Returns
432  * true if the entire range is contained in the pool and false otherwise.
433  */
434 bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
435 			size_t size)
436 {
437 	bool found = false;
438 	unsigned long end = start + size - 1;
439 	struct gen_pool_chunk *chunk;
440 
441 	rcu_read_lock();
442 	list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
443 		if (start >= chunk->start_addr && start <= chunk->end_addr) {
444 			if (end <= chunk->end_addr) {
445 				found = true;
446 				break;
447 			}
448 		}
449 	}
450 	rcu_read_unlock();
451 	return found;
452 }
453 
454 /**
455  * gen_pool_avail - get available free space of the pool
456  * @pool: pool to get available free space
457  *
458  * Return available free space of the specified pool.
459  */
460 size_t gen_pool_avail(struct gen_pool *pool)
461 {
462 	struct gen_pool_chunk *chunk;
463 	size_t avail = 0;
464 
465 	rcu_read_lock();
466 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
467 		avail += atomic_long_read(&chunk->avail);
468 	rcu_read_unlock();
469 	return avail;
470 }
471 EXPORT_SYMBOL_GPL(gen_pool_avail);
472 
473 /**
474  * gen_pool_size - get size in bytes of memory managed by the pool
475  * @pool: pool to get size
476  *
477  * Return size in bytes of memory managed by the pool.
478  */
479 size_t gen_pool_size(struct gen_pool *pool)
480 {
481 	struct gen_pool_chunk *chunk;
482 	size_t size = 0;
483 
484 	rcu_read_lock();
485 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
486 		size += chunk_size(chunk);
487 	rcu_read_unlock();
488 	return size;
489 }
490 EXPORT_SYMBOL_GPL(gen_pool_size);
491 
492 /**
493  * gen_pool_set_algo - set the allocation algorithm
494  * @pool: pool to change allocation algorithm
495  * @algo: custom algorithm function
496  * @data: additional data used by @algo
497  *
498  * Call @algo for each memory allocation in the pool.
499  * If @algo is NULL use gen_pool_first_fit as default
500  * memory allocation function.
501  */
502 void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
503 {
504 	rcu_read_lock();
505 
506 	pool->algo = algo;
507 	if (!pool->algo)
508 		pool->algo = gen_pool_first_fit;
509 
510 	pool->data = data;
511 
512 	rcu_read_unlock();
513 }
514 EXPORT_SYMBOL(gen_pool_set_algo);
515 
516 /**
517  * gen_pool_first_fit - find the first available region
518  * of memory matching the size requirement (no alignment constraint)
519  * @map: The address to base the search on
520  * @size: The bitmap size in bits
521  * @start: The bitnumber to start searching at
522  * @nr: The number of zeroed bits we're looking for
523  * @data: additional data - unused
524  * @pool: pool to find the fit region memory from
525  */
526 unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
527 		unsigned long start, unsigned int nr, void *data,
528 		struct gen_pool *pool, unsigned long start_addr)
529 {
530 	return bitmap_find_next_zero_area(map, size, start, nr, 0);
531 }
532 EXPORT_SYMBOL(gen_pool_first_fit);
533 
534 /**
535  * gen_pool_first_fit_align - find the first available region
536  * of memory matching the size requirement (alignment constraint)
537  * @map: The address to base the search on
538  * @size: The bitmap size in bits
539  * @start: The bitnumber to start searching at
540  * @nr: The number of zeroed bits we're looking for
541  * @data: data for alignment
542  * @pool: pool to get order from
543  */
544 unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size,
545 		unsigned long start, unsigned int nr, void *data,
546 		struct gen_pool *pool, unsigned long start_addr)
547 {
548 	struct genpool_data_align *alignment;
549 	unsigned long align_mask, align_off;
550 	int order;
551 
552 	alignment = data;
553 	order = pool->min_alloc_order;
554 	align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1;
555 	align_off = (start_addr & (alignment->align - 1)) >> order;
556 
557 	return bitmap_find_next_zero_area_off(map, size, start, nr,
558 					      align_mask, align_off);
559 }
560 EXPORT_SYMBOL(gen_pool_first_fit_align);
561 
562 /**
563  * gen_pool_fixed_alloc - reserve a specific region
564  * @map: The address to base the search on
565  * @size: The bitmap size in bits
566  * @start: The bitnumber to start searching at
567  * @nr: The number of zeroed bits we're looking for
568  * @data: data for alignment
569  * @pool: pool to get order from
570  */
571 unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size,
572 		unsigned long start, unsigned int nr, void *data,
573 		struct gen_pool *pool, unsigned long start_addr)
574 {
575 	struct genpool_data_fixed *fixed_data;
576 	int order;
577 	unsigned long offset_bit;
578 	unsigned long start_bit;
579 
580 	fixed_data = data;
581 	order = pool->min_alloc_order;
582 	offset_bit = fixed_data->offset >> order;
583 	if (WARN_ON(fixed_data->offset & ((1UL << order) - 1)))
584 		return size;
585 
586 	start_bit = bitmap_find_next_zero_area(map, size,
587 			start + offset_bit, nr, 0);
588 	if (start_bit != offset_bit)
589 		start_bit = size;
590 	return start_bit;
591 }
592 EXPORT_SYMBOL(gen_pool_fixed_alloc);
593 
594 /**
595  * gen_pool_first_fit_order_align - find the first available region
596  * of memory matching the size requirement. The region will be aligned
597  * to the order of the size specified.
598  * @map: The address to base the search on
599  * @size: The bitmap size in bits
600  * @start: The bitnumber to start searching at
601  * @nr: The number of zeroed bits we're looking for
602  * @data: additional data - unused
603  * @pool: pool to find the fit region memory from
604  */
605 unsigned long gen_pool_first_fit_order_align(unsigned long *map,
606 		unsigned long size, unsigned long start,
607 		unsigned int nr, void *data, struct gen_pool *pool,
608 		unsigned long start_addr)
609 {
610 	unsigned long align_mask = roundup_pow_of_two(nr) - 1;
611 
612 	return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
613 }
614 EXPORT_SYMBOL(gen_pool_first_fit_order_align);
615 
616 /**
617  * gen_pool_best_fit - find the best fitting region of memory
618  * macthing the size requirement (no alignment constraint)
619  * @map: The address to base the search on
620  * @size: The bitmap size in bits
621  * @start: The bitnumber to start searching at
622  * @nr: The number of zeroed bits we're looking for
623  * @data: additional data - unused
624  * @pool: pool to find the fit region memory from
625  *
626  * Iterate over the bitmap to find the smallest free region
627  * which we can allocate the memory.
628  */
629 unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
630 		unsigned long start, unsigned int nr, void *data,
631 		struct gen_pool *pool, unsigned long start_addr)
632 {
633 	unsigned long start_bit = size;
634 	unsigned long len = size + 1;
635 	unsigned long index;
636 
637 	index = bitmap_find_next_zero_area(map, size, start, nr, 0);
638 
639 	while (index < size) {
640 		int next_bit = find_next_bit(map, size, index + nr);
641 		if ((next_bit - index) < len) {
642 			len = next_bit - index;
643 			start_bit = index;
644 			if (len == nr)
645 				return start_bit;
646 		}
647 		index = bitmap_find_next_zero_area(map, size,
648 						   next_bit + 1, nr, 0);
649 	}
650 
651 	return start_bit;
652 }
653 EXPORT_SYMBOL(gen_pool_best_fit);
654 
655 static void devm_gen_pool_release(struct device *dev, void *res)
656 {
657 	gen_pool_destroy(*(struct gen_pool **)res);
658 }
659 
660 static int devm_gen_pool_match(struct device *dev, void *res, void *data)
661 {
662 	struct gen_pool **p = res;
663 
664 	/* NULL data matches only a pool without an assigned name */
665 	if (!data && !(*p)->name)
666 		return 1;
667 
668 	if (!data || !(*p)->name)
669 		return 0;
670 
671 	return !strcmp((*p)->name, data);
672 }
673 
674 /**
675  * gen_pool_get - Obtain the gen_pool (if any) for a device
676  * @dev: device to retrieve the gen_pool from
677  * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
678  *
679  * Returns the gen_pool for the device if one is present, or NULL.
680  */
681 struct gen_pool *gen_pool_get(struct device *dev, const char *name)
682 {
683 	struct gen_pool **p;
684 
685 	p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match,
686 			(void *)name);
687 	if (!p)
688 		return NULL;
689 	return *p;
690 }
691 EXPORT_SYMBOL_GPL(gen_pool_get);
692 
693 /**
694  * devm_gen_pool_create - managed gen_pool_create
695  * @dev: device that provides the gen_pool
696  * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
697  * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes
698  * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
699  *
700  * Create a new special memory pool that can be used to manage special purpose
701  * memory not managed by the regular kmalloc/kfree interface. The pool will be
702  * automatically destroyed by the device management code.
703  */
704 struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
705 				      int nid, const char *name)
706 {
707 	struct gen_pool **ptr, *pool;
708 	const char *pool_name = NULL;
709 
710 	/* Check that genpool to be created is uniquely addressed on device */
711 	if (gen_pool_get(dev, name))
712 		return ERR_PTR(-EINVAL);
713 
714 	if (name) {
715 		pool_name = kstrdup_const(name, GFP_KERNEL);
716 		if (!pool_name)
717 			return ERR_PTR(-ENOMEM);
718 	}
719 
720 	ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
721 	if (!ptr)
722 		goto free_pool_name;
723 
724 	pool = gen_pool_create(min_alloc_order, nid);
725 	if (!pool)
726 		goto free_devres;
727 
728 	*ptr = pool;
729 	pool->name = pool_name;
730 	devres_add(dev, ptr);
731 
732 	return pool;
733 
734 free_devres:
735 	devres_free(ptr);
736 free_pool_name:
737 	kfree_const(pool_name);
738 
739 	return ERR_PTR(-ENOMEM);
740 }
741 EXPORT_SYMBOL(devm_gen_pool_create);
742 
743 #ifdef CONFIG_OF
744 /**
745  * of_gen_pool_get - find a pool by phandle property
746  * @np: device node
747  * @propname: property name containing phandle(s)
748  * @index: index into the phandle array
749  *
750  * Returns the pool that contains the chunk starting at the physical
751  * address of the device tree node pointed at by the phandle property,
752  * or NULL if not found.
753  */
754 struct gen_pool *of_gen_pool_get(struct device_node *np,
755 	const char *propname, int index)
756 {
757 	struct platform_device *pdev;
758 	struct device_node *np_pool, *parent;
759 	const char *name = NULL;
760 	struct gen_pool *pool = NULL;
761 
762 	np_pool = of_parse_phandle(np, propname, index);
763 	if (!np_pool)
764 		return NULL;
765 
766 	pdev = of_find_device_by_node(np_pool);
767 	if (!pdev) {
768 		/* Check if named gen_pool is created by parent node device */
769 		parent = of_get_parent(np_pool);
770 		pdev = of_find_device_by_node(parent);
771 		of_node_put(parent);
772 
773 		of_property_read_string(np_pool, "label", &name);
774 		if (!name)
775 			name = np_pool->name;
776 	}
777 	if (pdev)
778 		pool = gen_pool_get(&pdev->dev, name);
779 	of_node_put(np_pool);
780 
781 	return pool;
782 }
783 EXPORT_SYMBOL_GPL(of_gen_pool_get);
784 #endif /* CONFIG_OF */
785