xref: /openbmc/linux/lib/genalloc.c (revision 7fe2f639)
1 /*
2  * Basic general purpose allocator for managing special purpose memory
3  * not managed by the regular kmalloc/kfree interface.
4  * Uses for this includes on-device special memory, uncached memory
5  * etc.
6  *
7  * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
8  *
9  * This source code is licensed under the GNU General Public License,
10  * Version 2.  See the file COPYING for more details.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/bitmap.h>
16 #include <linux/genalloc.h>
17 
18 
19 /**
20  * gen_pool_create - create a new special memory pool
21  * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
22  * @nid: node id of the node the pool structure should be allocated on, or -1
23  *
24  * Create a new special memory pool that can be used to manage special purpose
25  * memory not managed by the regular kmalloc/kfree interface.
26  */
27 struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
28 {
29 	struct gen_pool *pool;
30 
31 	pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
32 	if (pool != NULL) {
33 		rwlock_init(&pool->lock);
34 		INIT_LIST_HEAD(&pool->chunks);
35 		pool->min_alloc_order = min_alloc_order;
36 	}
37 	return pool;
38 }
39 EXPORT_SYMBOL(gen_pool_create);
40 
41 /**
42  * gen_pool_add_virt - add a new chunk of special memory to the pool
43  * @pool: pool to add new memory chunk to
44  * @virt: virtual starting address of memory chunk to add to pool
45  * @phys: physical starting address of memory chunk to add to pool
46  * @size: size in bytes of the memory chunk to add to pool
47  * @nid: node id of the node the chunk structure and bitmap should be
48  *       allocated on, or -1
49  *
50  * Add a new chunk of special memory to the specified pool.
51  *
52  * Returns 0 on success or a -ve errno on failure.
53  */
54 int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
55 		 size_t size, int nid)
56 {
57 	struct gen_pool_chunk *chunk;
58 	int nbits = size >> pool->min_alloc_order;
59 	int nbytes = sizeof(struct gen_pool_chunk) +
60 				(nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
61 
62 	chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
63 	if (unlikely(chunk == NULL))
64 		return -ENOMEM;
65 
66 	spin_lock_init(&chunk->lock);
67 	chunk->phys_addr = phys;
68 	chunk->start_addr = virt;
69 	chunk->end_addr = virt + size;
70 
71 	write_lock(&pool->lock);
72 	list_add(&chunk->next_chunk, &pool->chunks);
73 	write_unlock(&pool->lock);
74 
75 	return 0;
76 }
77 EXPORT_SYMBOL(gen_pool_add_virt);
78 
79 /**
80  * gen_pool_virt_to_phys - return the physical address of memory
81  * @pool: pool to allocate from
82  * @addr: starting address of memory
83  *
84  * Returns the physical address on success, or -1 on error.
85  */
86 phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
87 {
88 	struct list_head *_chunk;
89 	struct gen_pool_chunk *chunk;
90 
91 	read_lock(&pool->lock);
92 	list_for_each(_chunk, &pool->chunks) {
93 		chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
94 
95 		if (addr >= chunk->start_addr && addr < chunk->end_addr)
96 			return chunk->phys_addr + addr - chunk->start_addr;
97 	}
98 	read_unlock(&pool->lock);
99 
100 	return -1;
101 }
102 EXPORT_SYMBOL(gen_pool_virt_to_phys);
103 
104 /**
105  * gen_pool_destroy - destroy a special memory pool
106  * @pool: pool to destroy
107  *
108  * Destroy the specified special memory pool. Verifies that there are no
109  * outstanding allocations.
110  */
111 void gen_pool_destroy(struct gen_pool *pool)
112 {
113 	struct list_head *_chunk, *_next_chunk;
114 	struct gen_pool_chunk *chunk;
115 	int order = pool->min_alloc_order;
116 	int bit, end_bit;
117 
118 
119 	list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
120 		chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
121 		list_del(&chunk->next_chunk);
122 
123 		end_bit = (chunk->end_addr - chunk->start_addr) >> order;
124 		bit = find_next_bit(chunk->bits, end_bit, 0);
125 		BUG_ON(bit < end_bit);
126 
127 		kfree(chunk);
128 	}
129 	kfree(pool);
130 	return;
131 }
132 EXPORT_SYMBOL(gen_pool_destroy);
133 
134 /**
135  * gen_pool_alloc - allocate special memory from the pool
136  * @pool: pool to allocate from
137  * @size: number of bytes to allocate from the pool
138  *
139  * Allocate the requested number of bytes from the specified pool.
140  * Uses a first-fit algorithm.
141  */
142 unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
143 {
144 	struct list_head *_chunk;
145 	struct gen_pool_chunk *chunk;
146 	unsigned long addr, flags;
147 	int order = pool->min_alloc_order;
148 	int nbits, start_bit, end_bit;
149 
150 	if (size == 0)
151 		return 0;
152 
153 	nbits = (size + (1UL << order) - 1) >> order;
154 
155 	read_lock(&pool->lock);
156 	list_for_each(_chunk, &pool->chunks) {
157 		chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
158 
159 		end_bit = (chunk->end_addr - chunk->start_addr) >> order;
160 
161 		spin_lock_irqsave(&chunk->lock, flags);
162 		start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0,
163 						nbits, 0);
164 		if (start_bit >= end_bit) {
165 			spin_unlock_irqrestore(&chunk->lock, flags);
166 			continue;
167 		}
168 
169 		addr = chunk->start_addr + ((unsigned long)start_bit << order);
170 
171 		bitmap_set(chunk->bits, start_bit, nbits);
172 		spin_unlock_irqrestore(&chunk->lock, flags);
173 		read_unlock(&pool->lock);
174 		return addr;
175 	}
176 	read_unlock(&pool->lock);
177 	return 0;
178 }
179 EXPORT_SYMBOL(gen_pool_alloc);
180 
181 /**
182  * gen_pool_free - free allocated special memory back to the pool
183  * @pool: pool to free to
184  * @addr: starting address of memory to free back to pool
185  * @size: size in bytes of memory to free
186  *
187  * Free previously allocated special memory back to the specified pool.
188  */
189 void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
190 {
191 	struct list_head *_chunk;
192 	struct gen_pool_chunk *chunk;
193 	unsigned long flags;
194 	int order = pool->min_alloc_order;
195 	int bit, nbits;
196 
197 	nbits = (size + (1UL << order) - 1) >> order;
198 
199 	read_lock(&pool->lock);
200 	list_for_each(_chunk, &pool->chunks) {
201 		chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
202 
203 		if (addr >= chunk->start_addr && addr < chunk->end_addr) {
204 			BUG_ON(addr + size > chunk->end_addr);
205 			spin_lock_irqsave(&chunk->lock, flags);
206 			bit = (addr - chunk->start_addr) >> order;
207 			while (nbits--)
208 				__clear_bit(bit++, chunk->bits);
209 			spin_unlock_irqrestore(&chunk->lock, flags);
210 			break;
211 		}
212 	}
213 	BUG_ON(nbits > 0);
214 	read_unlock(&pool->lock);
215 }
216 EXPORT_SYMBOL(gen_pool_free);
217